source
stringlengths
3
92
c
stringlengths
26
2.25M
linalg.h
/** * Copyright (c) 2020, Massachusetts Institute of Technology, * Cambridge, MA 02139 * All Rights Reserved * Authors: Jingnan Shi, et al. (see THANKS for the full author list) * See LICENSE for the license information */ #pragma once #include <iostream> #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/Eigenvalues> namespace teaser { /** * Return the hat map of the provided vector (a skew symmetric matrix). * @param u 3-by-1 vector * @param x 3-by-3 skew symmetric matrix */ Eigen::Matrix<double, 3, 3> hatmap(const Eigen::Matrix<double, 3, 1>& u) { Eigen::Matrix<double, 3, 3> x; // clang-format off x << 0, -u(2), u(1), u(2), 0, -u(0), -u(1), u(0), 0; // clang-format on return x; } /** * Vector-vector kronecker product function with fixed-size output * @tparam NumT * @tparam N size of the first vector * @tparam M size of the second vector * @param v1 [in] first vector * @param v2 [in] second vector * @param output [out] output vector */ template <typename NumT, int N, int M> void vectorKron(const Eigen::Matrix<NumT, N, 1>& v1, const Eigen::Matrix<NumT, M, 1>& v2, Eigen::Matrix<NumT, N * M, 1>* output) { #pragma omp parallel for collapse(2) shared(v1, v2, output) default(none) for (size_t i = 0; i < N; ++i) { for (size_t j = 0; j < M; ++j) { (*output)[i * M + j] = v1[i] * v2[j]; } } } /** * Vector-vector kronecker product function with dynamic-size output * @tparam NumT numerical type for Eigen matrices (double, float, etc.) * @param v1 [in] first vector * @param v2 [in] second vector * @return Result of kronecker product */ template <typename NumT, int N, int M> Eigen::Matrix<NumT, Eigen::Dynamic, 1> vectorKron(const Eigen::Matrix<NumT, N, 1>& v1, const Eigen::Matrix<NumT, M, 1>& v2) { Eigen::Matrix<double, Eigen::Dynamic, 1> output(v1.rows() * v2.rows(), 1); #pragma omp parallel for collapse(2) shared(v1, v2, output) default(none) for (int i = 0; i < v1.rows(); ++i) { for (int j = 0; j < v2.rows(); ++j) { output[i * v2.rows() + j] = v1[i] * v2[j]; } } return output; } /** * Find the nearest (in Frobenius norm) Symmetric Positive Definite matrix to A * * See: https://www.sciencedirect.com/science/article/pii/0024379588902236 * * @tparam NumT numerical type for Eigen matrices (double, float, etc.) * @param A [in] input matrix * @param nearestPSD [out] output neaest positive semi-definite matrix * @param eig_threshold [in] optional threshold of determining the smallest eigen values */ template <typename NumT> void getNearestPSD(const Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>& A, Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>* nearestPSD) { assert(A.rows() == A.cols()); nearestPSD->resize(A.rows(), A.cols()); // symmetrize A into B Eigen::MatrixXd B = (A + A.transpose()) / 2; // eigendecomposition of B Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eig_B(B); Eigen::VectorXd De = eig_B.eigenvalues(); Eigen::MatrixXd De_positive = (De.array() < 0).select(0, De).asDiagonal(); Eigen::MatrixXd Ve = eig_B.eigenvectors(); *nearestPSD = Ve * De_positive * Ve.transpose(); } } // namespace teaser
askewBD2.c
#include <R.h> #include <stdint.h> #include <x86intrin.h> #include <omp.h> #define min(A,B) ((A)<(B) ? (A) : (B)) #define max(A,B) ((A)>(B) ? (A) : (B)) #define Data(i,j) data[(j) * n + (i)] //R uses column-major order #define Sorted(i,j) sorted[(j) * n + (i)] #define Idx(i,j) idx[(j) * n + (i)] #define Rank(i,j) rank[(j) * n + (i)] #define Ticks(t,j) ticks[(j) * numTick + (t)] #define TICK 7 #define OneTick 128 //2^TICK functions per tick #define WORD 6 //uint64_t, 2^6 #define OneWord 64 #define MASK 0x0000003F //6 1's #define ONES 0xFFFFFFFFFFFFFFFFull //64 1's struct node { double val; unsigned idx; }; typedef struct node node; static node *toSort; static unsigned n, m, numThread, *idx, *rank, numTick, vecLen; static uint64_t *count; static double *sorted; static uint64_t **ticks; //numTick * m static unsigned twoAs; //2^m static uint64_t ***bitVec; //twoAs static uint64_t **EQ, **GE, **LE; static unsigned **cnt; static uint64_t oneBit[64]; //oneBit[0] 0001 //oneBit[1] 0010 //oneBit[2] 0100 //oneBit[3] 1000 static int cmpNode(const void *a, const void *b) { double x, y; x = ((node*) a)->val; y = ((node*) b)->val; //descending order if (x > y) return -1; if (x < y) return 1; return 0; } static void sortFunc(double *data) { unsigned i, j; for (j = 0; j < m; j++) { for (i = 0; i < n; i++) { toSort[i].val = Data(i, j); toSort[i].idx = i; } qsort((void *) toSort, n, sizeof(node), cmpNode); for (i = 0; i < n; i++) { Idx(i, j) = toSort[i].idx; //which row has rank i at col j Sorted(i, j) = toSort[i].val; //func value at rank i, col j Rank(toSort[i].idx, j) = i; //rank of row i at col j } } } static void initTick(void) { unsigned i, j, k, t, id, w, o; for (j = 0; j < m; j++) { for (k = 0; k < vecLen; k++) Ticks(0, j) [k] = 0; i = 0; for (t = 1; t < numTick; t++) { for (k = 0; k < vecLen; k++) Ticks(t, j) [k] = Ticks(t - 1, j) [k]; while (i < t * OneTick) { id = Idx(i, j), w = id >> WORD, o = id & MASK; Ticks(t, j) [w] |= oneBit[o]; i++; } } } } static void calcDepth(void) { //f func, r rank, c col, t tick, v vector, w word, o offset unsigned f, k, r, c, t, v, id, w, o, threadID, cntEQ; int i; #pragma omp parallel for private(i,k,r,c,t,id,w,o,threadID,cntEQ) for (f = 0; f < n; f++) { threadID = omp_get_thread_num(); for (v = 0; v < twoAs; v++) for (k = 0; k < vecLen; k++) bitVec[threadID] [v] [k] = ONES; for (k = 0; k < vecLen; k++) EQ[threadID] [k] = ONES; for (c = 0; c < m; c++) { r = Rank(f, c); t = r >> TICK; for (k = 0; k < vecLen; k++) GE[threadID] [k] = Ticks(t, c) [k]; i = t << TICK; while (i < n && Sorted(i, c) >= Sorted(r, c)) { id = Idx(i, c), w = id >> WORD, o = id & MASK; GE[threadID] [w] |= oneBit[o]; i++; } for (k = 0; k < vecLen; k++) LE[threadID] [k] = ~ GE[threadID] [k]; i--; while (i >= 0 && Sorted(i, c) <= Sorted(r, c)) { id = Idx(i, c), w = id >> WORD, o = id & MASK; LE[threadID] [w] |= oneBit[o]; i--; } for (v = 0; v < twoAs; v++) for (k = 0; k < vecLen; k++) bitVec[threadID] [v] [k] &= ((v & oneBit[c]) ? GE[threadID] [k] : LE[threadID] [k]); for (k = 0; k < vecLen; k++) EQ[threadID] [k] &= GE[threadID] [k] & LE[threadID] [k]; } cntEQ = 0; for (k = 0; k < vecLen; k++) cntEQ += _mm_popcnt_u64( EQ[threadID] [k] ); for (v = 0; v < twoAs; v++) { cnt[threadID] [v] = 0; for (k = 0; k < vecLen; k++) cnt[threadID] [v] += _mm_popcnt_u64( bitVec[threadID] [v] [k] ); cnt[threadID] [v] -= cntEQ; } //fictitious functions with indices >= n, are less than all functions cnt[threadID] [0] -= ((vecLen << WORD) - n); count[f] = (n - cntEQ) * cntEQ + cntEQ * (cntEQ - 1) / 2; for (v = 0; v < twoAs >> 1; v++) count[f] += cnt[threadID] [v] * cnt[threadID] [twoAs - 1 - v]; } } void askewBD2(int *row, int *col, double *data, double *depth) { unsigned i, j; n = *row; m = *col; if (n <= OneTick) { fprintf(stderr, "minimum %u rows\n", OneTick + 1); exit(1); } numThread = omp_get_max_threads(); omp_set_num_threads(numThread); count = (uint64_t*)malloc(sizeof(uint64_t) * n); toSort = (node*) malloc(sizeof(node) * n); sorted = (double*) malloc(sizeof(double) * n * m); idx = (unsigned*)malloc(sizeof(unsigned) * n * m); rank = (unsigned*)malloc(sizeof(unsigned) * n * m); oneBit[0] = 1; for (i = 1; i < 64; i++) oneBit[i] = oneBit[i - 1] << 1; twoAs = 1 << m; //one tick mark every 128 functions numTick = (n >> TICK) + 1; ticks = (uint64_t**)malloc(sizeof(uint64_t*) * numTick * m); //num of 64-bit words vecLen = (n >> WORD) + 1; for (i = 0; i < numTick * m; i++) ticks[i] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); bitVec = (uint64_t***)malloc(sizeof(uint64_t**) * numThread); for (i = 0; i < numThread; i++) { bitVec[i] = (uint64_t**)malloc(sizeof(uint64_t*) * twoAs); for (j = 0; j < twoAs; j++) bitVec[i] [j] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); } GE = (uint64_t**)malloc(sizeof(uint64_t*) * numThread); LE = (uint64_t**)malloc(sizeof(uint64_t*) * numThread); EQ = (uint64_t**)malloc(sizeof(uint64_t*) * numThread); cnt = (unsigned**)malloc(sizeof(unsigned*) * numThread); for (i = 0; i < numThread; i++) { GE[i] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); LE[i] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); EQ[i] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); cnt[i] = (unsigned*)malloc(sizeof(unsigned) * twoAs); } sortFunc(data); initTick(); calcDepth(); for (i = 0; i < n; i++) depth[i] = (double)count[i] / (n * (n - 1.0) / 2.0); free(count); free(toSort); free(sorted); free(idx); free(rank); for (i = 0; i < numTick * m; i++) free(ticks[i]); free(ticks); for (i = 0; i < numThread; i++) { for (j = 0; j < twoAs; j++) free(bitVec[i] [j]); free(bitVec[i]); } free(bitVec); for (i = 0; i < numThread; i++) { free(GE[i]); free(LE[i]); free(EQ[i]); free(cnt[i]); } free(GE); free(LE); free(EQ); free(cnt); }
5.race1.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 0; j < N; j++) // Array Out of Bound Access A[i][j] = A[i][j - 1]; } // CHECK: Data Race detected // END
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS #define MAX_THREADS 128 // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #define MAX_THREADS 128 #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } // For debugging void print_xmm(fptype in, char* s) { printf("%s: %f\n", s, in); } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Sholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version " __PARSEC_XSTRING(PARSEC_VERSION) "\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = atoi(argv[1]); char *inputFile = argv[2]; char *outputFile = argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %zu\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS int tids[nThreads]; for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); #else//ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef WIN32 if (nThreads > 1) { HANDLE threads[MAX_THREADS]; int nums[MAX_THREADS]; for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); } else #endif { int tid=0; bs_thread(&tid); } #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
CRSMatrixDiagPrecond.h
#ifndef CRSMATRIXDIAGPRECOND_H #define CRSMATRIXDIAGPRECOND_H #ifdef _OPENMP #include <omp.h> #endif #include "CRSMatrix.h" #include "sparse_io.h" #include "../Preconditioner/generateDiagPrecond.h" namespace MathLib { #ifdef MSVC_VER #pragma warning(push) #pragma warning(disable: 4018) #endif /** * Class CRSMatrixDiagPrecond represents a matrix in compressed row storage * format associated with a diagonal preconditioner. * * The user can either read the matrix from a file or give corresponding arrays * to an alternative constructor. In both cases the user have to calculate the * preconditioner explicit via calcPrecond() method! */ class CRSMatrixDiagPrecond : public CRSMatrix<double, unsigned> { public: /** * Constructor takes a file name. The file is read in binary format * by the constructor of the base class (template) CRSMatrix. * Further details you can see in function CS_read(). * * The user have to calculate the preconditioner explicit via calcPrecond() method! * * @param fname the name of the file that contains the matrix in * binary compressed row storage format * @return object of class CRSMatrixDiagPrecond */ CRSMatrixDiagPrecond(std::string const &fname) : CRSMatrix<double, unsigned> (fname), _inv_diag(NULL) {} /** * Constructs a matrix object from given data. * * The user have to calculate the preconditioner explicit via calcPrecond() method! * @param n number of rows / columns of the matrix * @param iA row pointer of matrix in compressed row storage format * @param jA column index of matrix in compressed row storage format * @param A data entries of matrix in compressed row storage format * @return */ CRSMatrixDiagPrecond(unsigned n, unsigned *iA, unsigned *jA, double* A) : CRSMatrix<double, unsigned> (n, iA, jA, A), _inv_diag(NULL) {} void calcPrecond() { if (_inv_diag != NULL) delete [] _inv_diag; _inv_diag = new double[_n_rows]; if (!generateDiagPrecond(_n_rows, _row_ptr, _col_idx, _data, _inv_diag)) { std::cout << "Could not create diagonal preconditioner" << std::endl; } // if (!generateDiagPrecondRowSum(_n_rows, _row_ptr, _data, _inv_diag)) { // std::cout << "Could not create diagonal preconditioner" << std::endl; // } // if (!generateDiagPrecondRowMax(_n_rows, _row_ptr, _data, _inv_diag)) { // std::cout << "Could not create diagonal preconditioner" << std::endl; // } } void precondApply(double* x) const { { OPENMP_LOOP_TYPE k; // #pragma omp parallel for for (k=0; k<_n_rows; ++k) { x[k] = _inv_diag[k]*x[k]; } } } ~CRSMatrixDiagPrecond() { delete [] _inv_diag; } private: double *_inv_diag; }; #ifdef MSVC_VER #pragma warning(pop) #endif } #endif
25_omp_stack.c
// clang-format off // RUN: %run %s --omp 2>&1 | FileCheck %s --check-prefix=CHECK-TSAN // RUN: %run %s --omp 2>&1 | FileCheck %s // REQUIRES: openmp // clang-format on void f() { char c[4]; double d = 5; } int main(int argc, char** argv) { // CHECK: [Trace] TypeART Runtime Trace #pragma omp parallel sections { #pragma omp section f(); #pragma omp section f(); } // CHECK-TSAN-NOT: ThreadSanitizer // CHECK-NOT: Error // CHECK: [Trace] Free 0x{{.*}} 0 int8 1 4 // CHECK-DAG: [Trace] Free 0x{{.*}} 6 double 8 1 // CHECK-DAG: [Trace] Free 0x{{.*}} 0 int8 1 4 // CHECK-DAG: [Trace] Free 0x{{.*}} 6 double 8 1 return 0; }
gameoflife.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #ifdef MPI #include <mpi.h> #endif #ifdef OpenMP #include <omp.h> #endif #define ALIVE 1 #define DEAD 0 /******************************************** * Need at least this many rows and columns * ********************************************/ const int MINIMUM_ROWS = 1; const int MINIMUM_COLUMNS = 1; const int MINIMUM_TIME_STEPS = 1; /***************************************************** * Add an "s" to the end of a value's name if needed * *****************************************************/ void pluralize_value_if_needed(int value) { if(value != 1) fprintf(stderr, "s"); return; } /******************************************************************************* * Make sure a value is >= another value, print error and return -1 if it isn't ******************************************************************************/ int assert_minimum_value(char which_value[16], int actual_value, int expected_value) { int retval; if(actual_value < expected_value) { fprintf(stderr, "ERROR: %d %s", actual_value, which_value); pluralize_value_if_needed(actual_value); fprintf(stderr, "; need at least %d %s", expected_value, which_value); pluralize_value_if_needed(expected_value); fprintf(stderr, "\n"); retval = -1; } else retval = 0; return retval; } /****************************************************************************** * Print a function name and exit if the specified boolean expression is true * ******************************************************************************/ void exit_if(int boolean_expression, char function_name[32], int OUR_RANK) { if(boolean_expression) { #ifdef MPI fprintf(stderr, "Rank %d ", OUR_RANK); #endif #ifdef OpenMP fprintf(stderr, "Thread %d ", omp_get_thread_num()); #endif fprintf(stderr, "ERROR in %s\n", function_name); exit(-1); } return; } /**************** * Main program * ****************/ int main(int argc, char **argv) { int NUMBER_OF_ROWS = 5, NUMBER_OF_COLUMNS = 5, NUMBER_OF_TIME_STEPS = 5, OUR_NUMBER_OF_ROWS = 5, OUR_RANK = 0, NUMBER_OF_PROCESSES = 1, our_current_row, my_current_column, my_neighbor_row, my_neighbor_column, my_number_of_alive_neighbors, c, return_value, next_lowest_rank, next_highest_rank; int **our_current_grid, **our_next_grid; int current_time_step; #ifdef SHOW_RESULTS int current_rank; #endif /* I. Initialize the distributed memory environment */ #ifdef MPI exit_if((MPI_Init(&argc, &argv) != MPI_SUCCESS), "MPI_Init", OUR_RANK); exit_if((MPI_Comm_rank(MPI_COMM_WORLD, &OUR_RANK) != MPI_SUCCESS),"MPI_Comm_rank", OUR_RANK); exit_if((MPI_Comm_size(MPI_COMM_WORLD, &NUMBER_OF_PROCESSES)!= MPI_SUCCESS), "MPI_Comm_size", OUR_RANK); #endif /* II. Parse command line arguments */ while((c = getopt(argc, argv, "r:c:t:")) != -1) { switch(c) { case 'r': NUMBER_OF_ROWS = atoi(optarg); break; case 'c': NUMBER_OF_COLUMNS = atoi(optarg); break; case 't': NUMBER_OF_TIME_STEPS = atoi(optarg); break; case '?': default: #ifdef MPI fprintf(stderr, "Usage: mpirun -np NUMBER_OF_PROCESSES %s [-r NUMBER_OF_ROWS] [-c NUMBER_OF_COLUMNS] [-t NUMBER_OF_TIME_STEPS]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-r NUMBER_OF_ROWS] [-c NUMBER_OF_COLUMNS] [-t NUMBER_OF_TIME_STEPS]\n", argv[0]); #endif exit(-1); } } argc -= optind; argv += optind; /* III. Make sure we have enough rows, columns, and time steps */ return_value = assert_minimum_value("row", NUMBER_OF_ROWS, MINIMUM_ROWS); return_value += assert_minimum_value("column", NUMBER_OF_COLUMNS,MINIMUM_COLUMNS); return_value += assert_minimum_value("time step", NUMBER_OF_TIME_STEPS,MINIMUM_TIME_STEPS); /* IV. Exit if we don't */ if(return_value != 0) exit(-1); /* V. Determine our number of rows */ OUR_NUMBER_OF_ROWS = NUMBER_OF_ROWS / NUMBER_OF_PROCESSES; if(OUR_RANK == NUMBER_OF_PROCESSES - 1) { OUR_NUMBER_OF_ROWS += NUMBER_OF_ROWS % NUMBER_OF_PROCESSES; } /* VI. Allocate enough space in our current grid and next grid for the * number of rows and the number of columns, plus the ghost rows * and columns */ exit_if(((our_current_grid = (int**)malloc((OUR_NUMBER_OF_ROWS + 2) * (NUMBER_OF_COLUMNS + 2) * sizeof(int))) == NULL),"malloc(our_current_grid)", OUR_RANK); exit_if(((our_next_grid = (int**)malloc((OUR_NUMBER_OF_ROWS + 2) * (NUMBER_OF_COLUMNS + 2) * sizeof(int))) == NULL), "malloc(our_next_grid)", OUR_RANK); for(our_current_row = 0; our_current_row <= OUR_NUMBER_OF_ROWS + 1;our_current_row++) { exit_if(((our_current_grid[our_current_row]= (int*)malloc((NUMBER_OF_COLUMNS + 2) * sizeof(int))) == NULL), "malloc(our_current_grid[some_row])", OUR_RANK); exit_if(((our_next_grid[our_current_row]= (int*)malloc((NUMBER_OF_COLUMNS + 2) * sizeof(int)))== NULL), "malloc(our_next_grid[some_row])", OUR_RANK); } /* VII. Initialize the grid (each cell gets a random state) */ for(our_current_row = 1; our_current_row <= OUR_NUMBER_OF_ROWS;our_current_row++) { #ifdef OpenMP #pragma omp parallel for private(my_current_column) #endif for(my_current_column = 1; my_current_column <= NUMBER_OF_COLUMNS; my_current_column++) { our_current_grid[our_current_row][my_current_column] = random() % (ALIVE + 1); } } /* VIII. Determine the process with the next-lowest rank */ if(OUR_RANK == 0) next_lowest_rank = NUMBER_OF_PROCESSES - 1; else next_lowest_rank = OUR_RANK - 1; /* IX. Determine the process with the next-highest rank */ if(OUR_RANK == NUMBER_OF_PROCESSES - 1) next_highest_rank = 0; else next_highest_rank = OUR_RANK + 1; /* X. Run the simulation for the specified number of time steps */ for(current_time_step = 0; current_time_step <= NUMBER_OF_TIME_STEPS - 1; current_time_step++) { /* X.A. Set up the ghost rows */ #ifdef MPI /* X.A.1. Send our second-from-the-top row to the process with the * next-lowest rank */ exit_if((MPI_Send(our_current_grid[1], NUMBER_OF_COLUMNS + 2, MPI_INT, next_lowest_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS), "MPI_Send(top row)", OUR_RANK); /* X.A.2. Send our second-from-the-bottom row to the process * with the next-highest rank */ exit_if((MPI_Send(our_current_grid[OUR_NUMBER_OF_ROWS], NUMBER_OF_COLUMNS + 2, MPI_INT, next_highest_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS), "MPI_Send(bottom row)", OUR_RANK); /* X.A.3. Receive our bottom row from the process with the * next-highest rank */ exit_if((MPI_Recv(our_current_grid[OUR_NUMBER_OF_ROWS + 1], NUMBER_OF_COLUMNS + 2, MPI_INT, next_highest_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE) != MPI_SUCCESS), "MPI_Recv(bottom row)", OUR_RANK); /* X.A.4. Receive our top row from the process with the * next-lowest rank */ exit_if((MPI_Recv(our_current_grid[0], NUMBER_OF_COLUMNS + 2, MPI_INT, next_lowest_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE) != MPI_SUCCESS), "MPI_Recv(top row)", OUR_RANK); #else #ifdef OpenMP #pragma omp parallel private(my_current_column) #endif for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1; my_current_column++) { /* X.A.1. Set our top row to be the same as our second-to-last * row */ our_current_grid[0][my_current_column] = our_current_grid[OUR_NUMBER_OF_ROWS][my_current_column]; /* X.A.2. Set our bottom row to be the same as our * second-to-top row */ our_current_grid[OUR_NUMBER_OF_ROWS + 1][my_current_column] = our_current_grid[1][my_current_column]; } #endif /* X.B. Set up the ghost columns */ for(our_current_row = 0; our_current_row <= OUR_NUMBER_OF_ROWS + 1; our_current_row++) { /* X.B.1. The left ghost column is the same as the farthest-right, * non-ghost column */ our_current_grid[our_current_row][0] = our_current_grid[our_current_row][NUMBER_OF_COLUMNS]; /* X.B.2. The right ghost column is the same as the farthest-left, * non-ghost column */ our_current_grid[our_current_row][NUMBER_OF_COLUMNS + 1] = our_current_grid[our_current_row][1]; } /* X.C. Display our current grid */ #ifdef SHOW_RESULTS for(current_rank = 0; current_rank <= NUMBER_OF_PROCESSES - 1; current_rank++) printf("Time Step %d, Rank %d:\n", current_time_step, OUR_RANK); for(our_current_row = 0; our_current_row <= OUR_NUMBER_OF_ROWS + 1; our_current_row++) { if(our_current_row == 1) { for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1 + 2; my_current_column++) { printf("- "); } printf("\n"); } for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1; my_current_column++) { if(my_current_column == 1) { printf("| "); } printf("%d ", our_current_grid[our_current_row] [my_current_column]); if(my_current_column == NUMBER_OF_COLUMNS) { printf("| "); } } printf("\n"); if(our_current_row == OUR_NUMBER_OF_ROWS) { for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1 + 2; my_current_column++) { printf("- "); } printf("\n"); } } #endif /* X.D. Determine our next grid -- for each row, do the following: */ for(our_current_row = 1; our_current_row <= OUR_NUMBER_OF_ROWS; our_current_row++) { /* X.D.1. For each column, spawn threads to do the following: */ #ifdef OpenMP #pragma omp parallel for private(my_current_column, my_neighbor_row, my_neighbor_column, my_number_of_alive_neighbors) #endif for(my_current_column = 1; my_current_column <= NUMBER_OF_COLUMNS; my_current_column++) { /* X.D.1.a. Initialize the count of ALIVE neighbors to 0 */ my_number_of_alive_neighbors = 0; /* X.D.1.b. For each row of the cell's neighbors, do the * following: */ for(my_neighbor_row = our_current_row - 1; my_neighbor_row <= our_current_row + 1; my_neighbor_row++) { /* X.D.1.b.i. For each column of the cell's neighbors, do * the following: */ for(my_neighbor_column = my_current_column - 1; my_neighbor_column <= my_current_column + 1; my_neighbor_column++) { /* X.D.1.b.i.I. If the neighbor is not the cell itself, * and the neighbor is ALIVE, do the following: */ if((my_neighbor_row != our_current_row || my_neighbor_column != my_current_column) && (our_current_grid[my_neighbor_row] [my_neighbor_column] == ALIVE)) { /* X.D.1.b.i.I.A. Add 1 to the count of the * number of ALIVE neighbors */ my_number_of_alive_neighbors++; } } } /* X.D.1.c. Apply Rule 1 of Conway's Game of Life */ if(my_number_of_alive_neighbors < 2) { our_next_grid[our_current_row][my_current_column] = DEAD; } /* X.D.1.d. Apply Rule 2 of Conway's Game of Life */ if(our_current_grid[our_current_row][my_current_column] == ALIVE && (my_number_of_alive_neighbors == 2 || my_number_of_alive_neighbors == 3)) { our_next_grid[our_current_row][my_current_column] = ALIVE; } /* X.D.1.e. Apply Rule 3 of Conway's Game of Life */ if(my_number_of_alive_neighbors > 3) { our_next_grid[our_current_row][my_current_column] = DEAD; } /* X.D.1.f. Apply Rule 4 of Conway's Game of Life */ if(our_current_grid[our_current_row][my_current_column] == DEAD && my_number_of_alive_neighbors == 3) { our_next_grid[our_current_row][my_current_column] = ALIVE; } } } /* X.E. Spawn threads to copy the next grid into the current grid */ for(our_current_row = 1; our_current_row <= OUR_NUMBER_OF_ROWS; our_current_row++) { #ifdef OpenMP #pragma omp parallel for private(my_current_column) #endif for(my_current_column = 1; my_current_column <= NUMBER_OF_COLUMNS; my_current_column++) { our_current_grid[our_current_row][my_current_column] = our_next_grid[our_current_row][my_current_column]; } } } /* XI. Deallocate data structures */ for(our_current_row = OUR_NUMBER_OF_ROWS + 1; our_current_row >= 0; our_current_row--) { free(our_next_grid[our_current_row]); free(our_current_grid[our_current_row]); } free(our_next_grid); free(our_current_grid); /* XII. Finalize the distributed memory environment */ #ifdef MPI exit_if((MPI_Finalize() != MPI_SUCCESS), "MPI_Finalize", OUR_RANK); #endif return 0; }
node_relax.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_sstruct_ls.h" #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ typedef struct { MPI_Comm comm; HYPRE_Real tol; /* not yet used */ HYPRE_Int max_iter; HYPRE_Int rel_change; /* not yet used */ HYPRE_Int zero_guess; HYPRE_Real weight; HYPRE_Int num_nodesets; HYPRE_Int *nodeset_sizes; HYPRE_Int *nodeset_ranks; hypre_Index *nodeset_strides; hypre_Index **nodeset_indices; hypre_SStructPMatrix *A; hypre_SStructPVector *b; hypre_SStructPVector *x; hypre_SStructPVector *t; HYPRE_Int **diag_rank; /* defines sends and recieves for each struct_vector */ hypre_ComputePkg ***svec_compute_pkgs; hypre_CommHandle **comm_handle; /* defines independent and dependent boxes for computations */ hypre_ComputePkg **compute_pkgs; /* pointers to local storage used to invert diagonal blocks */ HYPRE_Real **A_loc; HYPRE_Real *x_loc; /* pointers for vector and matrix data */ HYPRE_Real ***Ap; HYPRE_Real **bp; HYPRE_Real **xp; HYPRE_Real **tp; /* log info (always logged) */ HYPRE_Int num_iterations; HYPRE_Int time_index; HYPRE_Int flops; } hypre_NodeRelaxData; /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ void * hypre_NodeRelaxCreate( MPI_Comm comm ) { hypre_NodeRelaxData *relax_data; hypre_Index stride; hypre_Index indices[1]; relax_data = hypre_CTAlloc(hypre_NodeRelaxData, 1); (relax_data -> comm) = comm; (relax_data -> time_index) = hypre_InitializeTiming("NodeRelax"); /* set defaults */ (relax_data -> tol) = 1.0e-06; (relax_data -> max_iter) = 1000; (relax_data -> rel_change) = 0; (relax_data -> zero_guess) = 0; (relax_data -> weight) = 1.0; (relax_data -> num_nodesets) = 0; (relax_data -> nodeset_sizes) = NULL; (relax_data -> nodeset_ranks) = NULL; (relax_data -> nodeset_strides) = NULL; (relax_data -> nodeset_indices) = NULL; (relax_data -> diag_rank) = NULL; (relax_data -> t) = NULL; (relax_data -> A_loc) = NULL; (relax_data -> x_loc) = NULL; (relax_data -> Ap) = NULL; (relax_data -> bp) = NULL; (relax_data -> xp) = NULL; (relax_data -> tp) = NULL; (relax_data -> comm_handle) = NULL; (relax_data -> svec_compute_pkgs)= NULL; (relax_data -> compute_pkgs) = NULL; hypre_SetIndex3(stride, 1, 1, 1); hypre_SetIndex3(indices[0], 0, 0, 0); hypre_NodeRelaxSetNumNodesets((void *) relax_data, 1); hypre_NodeRelaxSetNodeset((void *) relax_data, 0, 1, stride, indices); return (void *) relax_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxDestroy( void *relax_vdata ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; HYPRE_Int i,vi; HYPRE_Int nvars; if (relax_data) { nvars = hypre_SStructPMatrixNVars(relax_data -> A); for (i = 0; i < (relax_data -> num_nodesets); i++) { hypre_TFree(relax_data -> nodeset_indices[i]); for (vi = 0; vi < nvars; vi++) { hypre_ComputePkgDestroy(relax_data -> svec_compute_pkgs[i][vi]); } hypre_TFree(relax_data -> svec_compute_pkgs[i]); hypre_ComputePkgDestroy(relax_data -> compute_pkgs[i]); } hypre_TFree(relax_data -> nodeset_sizes); hypre_TFree(relax_data -> nodeset_ranks); hypre_TFree(relax_data -> nodeset_strides); hypre_TFree(relax_data -> nodeset_indices); hypre_SStructPMatrixDestroy(relax_data -> A); hypre_SStructPVectorDestroy(relax_data -> b); hypre_SStructPVectorDestroy(relax_data -> x); hypre_TFree(relax_data -> svec_compute_pkgs); hypre_TFree(relax_data -> comm_handle); hypre_TFree(relax_data -> compute_pkgs); hypre_SStructPVectorDestroy(relax_data -> t); hypre_TFree(relax_data -> x_loc); hypre_TFree((relax_data ->A_loc)[0]); hypre_TFree(relax_data -> A_loc); hypre_TFree(relax_data -> bp); hypre_TFree(relax_data -> xp); hypre_TFree(relax_data -> tp); for (vi = 0; vi < nvars; vi++) { hypre_TFree((relax_data -> Ap)[vi]); hypre_TFree((relax_data -> diag_rank)[vi]); } hypre_TFree(relax_data -> Ap); hypre_TFree(relax_data -> diag_rank); hypre_FinalizeTiming(relax_data -> time_index); hypre_TFree(relax_data); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetup( void *relax_vdata, hypre_SStructPMatrix *A, hypre_SStructPVector *b, hypre_SStructPVector *x ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; HYPRE_Int num_nodesets = (relax_data -> num_nodesets); HYPRE_Int *nodeset_sizes = (relax_data -> nodeset_sizes); hypre_Index *nodeset_strides = (relax_data -> nodeset_strides); hypre_Index **nodeset_indices = (relax_data -> nodeset_indices); HYPRE_Int ndim = hypre_SStructPMatrixNDim(A); hypre_SStructPVector *t; HYPRE_Int **diag_rank; HYPRE_Real **A_loc; HYPRE_Real *x_loc; HYPRE_Real ***Ap; HYPRE_Real **bp; HYPRE_Real **xp; HYPRE_Real **tp; hypre_ComputeInfo *compute_info; hypre_ComputePkg **compute_pkgs; hypre_ComputePkg ***svec_compute_pkgs; hypre_CommHandle **comm_handle; hypre_Index diag_index; hypre_IndexRef stride; hypre_IndexRef index; hypre_StructGrid *sgrid; hypre_StructStencil *sstencil; hypre_Index *sstencil_shape; HYPRE_Int sstencil_size; hypre_StructStencil *sstencil_union; hypre_Index *sstencil_union_shape; HYPRE_Int sstencil_union_count; hypre_BoxArrayArray *orig_indt_boxes; hypre_BoxArrayArray *orig_dept_boxes; hypre_BoxArrayArray *box_aa; hypre_BoxArray *box_a; hypre_Box *box; HYPRE_Int box_aa_size; HYPRE_Int box_a_size; hypre_BoxArrayArray *new_box_aa; hypre_BoxArray *new_box_a; hypre_Box *new_box; HYPRE_Real scale; HYPRE_Int frac; HYPRE_Int i, j, k, p, m, s, compute_i; HYPRE_Int vi, vj; HYPRE_Int nvars; HYPRE_Int dim; /*---------------------------------------------------------- * Set up the temp vector *----------------------------------------------------------*/ if ((relax_data -> t) == NULL) { hypre_SStructPVectorCreate(hypre_SStructPVectorComm(b), hypre_SStructPVectorPGrid(b), &t); hypre_SStructPVectorInitialize(t); hypre_SStructPVectorAssemble(t); (relax_data -> t) = t; } /*---------------------------------------------------------- * Find the matrix diagonals, use diag_rank[vi][vj] = -1 to * mark that the coresponding StructMatrix is NULL. *----------------------------------------------------------*/ nvars = hypre_SStructPMatrixNVars(A); diag_rank = hypre_CTAlloc(HYPRE_Int *, nvars); for (vi = 0; vi < nvars; vi++) { diag_rank[vi] = hypre_CTAlloc(HYPRE_Int, nvars); for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A, vi, vj) != NULL) { sstencil = hypre_SStructPMatrixSStencil(A, vi, vj); hypre_SetIndex3(diag_index, 0, 0, 0); diag_rank[vi][vj] = hypre_StructStencilElementRank(sstencil, diag_index); } else { diag_rank[vi][vj] = -1; } } } /*---------------------------------------------------------- * Allocate storage used to invert local diagonal blocks *----------------------------------------------------------*/ x_loc = hypre_TAlloc(HYPRE_Real , hypre_NumThreads()*nvars); A_loc = hypre_TAlloc(HYPRE_Real *, hypre_NumThreads()*nvars); A_loc[0] = hypre_TAlloc(HYPRE_Real , hypre_NumThreads()*nvars*nvars); for (vi = 1; vi < hypre_NumThreads()*nvars; vi++) { A_loc[vi] = A_loc[0] + vi*nvars; } /* Allocate pointers for vector and matrix */ bp = hypre_TAlloc(HYPRE_Real *, nvars); xp = hypre_TAlloc(HYPRE_Real *, nvars); tp = hypre_TAlloc(HYPRE_Real *, nvars); Ap = hypre_TAlloc(HYPRE_Real **, nvars); for (vi = 0; vi < nvars; vi++) { Ap[vi] = hypre_TAlloc(HYPRE_Real *, nvars); } /*---------------------------------------------------------- * Set up the compute packages for each nodeset *----------------------------------------------------------*/ sgrid = hypre_StructMatrixGrid(hypre_SStructPMatrixSMatrix(A, 0, 0)); dim = hypre_StructStencilNDim( hypre_SStructPMatrixSStencil(A, 0, 0)); compute_pkgs = hypre_CTAlloc(hypre_ComputePkg *, num_nodesets); svec_compute_pkgs = hypre_CTAlloc(hypre_ComputePkg **, num_nodesets); comm_handle = hypre_CTAlloc(hypre_CommHandle *, nvars); for (p = 0; p < num_nodesets; p++) { /*---------------------------------------------------------- * Set up the compute packages to define sends and recieves * for each struct_vector (svec_compute_pkgs) and the compute * package to define independent and dependent computations * (compute_pkgs). *----------------------------------------------------------*/ svec_compute_pkgs[p] = hypre_CTAlloc(hypre_ComputePkg *, nvars); for (vi = -1; vi < nvars; vi++) { /*---------------------------------------------------------- * The first execution (vi=-1) sets up the stencil to * define independent and dependent computations. The * stencil is the "union" over i,j of all stencils for * for struct_matrix A_ij. * * Other executions (vi > -1) set up the stencil to * define sends and recieves for the struct_vector vi. * The stencil for vector i is the "union" over j of all * stencils for struct_matrix A_ji. *----------------------------------------------------------*/ sstencil_union_count = 0; if (vi == -1) { for (i = 0; i < nvars; i++) { for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vj,i) != NULL) { sstencil = hypre_SStructPMatrixSStencil(A, vj, i); sstencil_union_count += hypre_StructStencilSize(sstencil); } } } } else { for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vj,vi) != NULL) { sstencil = hypre_SStructPMatrixSStencil(A, vj, vi); sstencil_union_count += hypre_StructStencilSize(sstencil); } } } sstencil_union_shape = hypre_CTAlloc(hypre_Index, sstencil_union_count); sstencil_union_count = 0; if (vi == -1) { for (i = 0; i < nvars; i++) { for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vj,i) != NULL) { sstencil = hypre_SStructPMatrixSStencil(A, vj, i); sstencil_size = hypre_StructStencilSize(sstencil); sstencil_shape = hypre_StructStencilShape(sstencil); for (s = 0; s < sstencil_size; s++) { hypre_CopyIndex(sstencil_shape[s], sstencil_union_shape[sstencil_union_count]); sstencil_union_count++; } } } } } else { for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vj,vi) != NULL) { sstencil = hypre_SStructPMatrixSStencil(A, vj, vi); sstencil_size = hypre_StructStencilSize(sstencil); sstencil_shape = hypre_StructStencilShape(sstencil); for (s = 0; s < sstencil_size; s++) { hypre_CopyIndex(sstencil_shape[s], sstencil_union_shape[sstencil_union_count]); sstencil_union_count++; } } } } sstencil_union = hypre_StructStencilCreate(dim, sstencil_union_count, sstencil_union_shape); hypre_CreateComputeInfo(sgrid, sstencil_union, &compute_info); orig_indt_boxes = hypre_ComputeInfoIndtBoxes(compute_info); orig_dept_boxes = hypre_ComputeInfoDeptBoxes(compute_info); stride = nodeset_strides[p]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: box_aa = orig_indt_boxes; break; case 1: box_aa = orig_dept_boxes; break; } box_aa_size = hypre_BoxArrayArraySize(box_aa); new_box_aa = hypre_BoxArrayArrayCreate(box_aa_size, ndim); for (i = 0; i < box_aa_size; i++) { box_a = hypre_BoxArrayArrayBoxArray(box_aa, i); box_a_size = hypre_BoxArraySize(box_a); new_box_a = hypre_BoxArrayArrayBoxArray(new_box_aa, i); hypre_BoxArraySetSize(new_box_a, box_a_size * nodeset_sizes[p]); k = 0; for (m = 0; m < nodeset_sizes[p]; m++) { index = nodeset_indices[p][m]; for (j = 0; j < box_a_size; j++) { box = hypre_BoxArrayBox(box_a, j); new_box = hypre_BoxArrayBox(new_box_a, k); hypre_CopyBox(box, new_box); hypre_ProjectBox(new_box, index, stride); k++; } } } switch(compute_i) { case 0: hypre_ComputeInfoIndtBoxes(compute_info) = new_box_aa; break; case 1: hypre_ComputeInfoDeptBoxes(compute_info) = new_box_aa; break; } } hypre_CopyIndex(stride, hypre_ComputeInfoStride(compute_info)); if (vi == -1) { hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace( hypre_SStructPVectorSVector(x, 0)), 1, sgrid, &compute_pkgs[p]); } else { hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace( hypre_SStructPVectorSVector(x, vi)), 1, sgrid, &svec_compute_pkgs[p][vi]); } hypre_BoxArrayArrayDestroy(orig_indt_boxes); hypre_BoxArrayArrayDestroy(orig_dept_boxes); hypre_StructStencilDestroy(sstencil_union); } } /*---------------------------------------------------------- * Set up the relax data structure *----------------------------------------------------------*/ hypre_SStructPMatrixRef(A, &(relax_data -> A)); hypre_SStructPVectorRef(x, &(relax_data -> x)); hypre_SStructPVectorRef(b, &(relax_data -> b)); (relax_data -> diag_rank) = diag_rank; (relax_data -> A_loc) = A_loc; (relax_data -> x_loc) = x_loc; (relax_data -> Ap) = Ap; (relax_data -> bp) = bp; (relax_data -> tp) = tp; (relax_data -> xp) = xp; (relax_data -> compute_pkgs) = compute_pkgs; (relax_data -> svec_compute_pkgs) = svec_compute_pkgs; (relax_data -> comm_handle) = comm_handle; /*----------------------------------------------------- * Compute flops *-----------------------------------------------------*/ scale = 0.0; for (p = 0; p < num_nodesets; p++) { stride = nodeset_strides[p]; frac = hypre_IndexX(stride); frac *= hypre_IndexY(stride); frac *= hypre_IndexZ(stride); scale += (nodeset_sizes[p] / frac); } /* REALLY Rough Estimate = num_nodes * nvar^3 */ (relax_data -> flops) = scale * nvars * nvars * nvars * hypre_StructVectorGlobalSize( hypre_SStructPVectorSVector(x,0)); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelax( void *relax_vdata, hypre_SStructPMatrix *A, hypre_SStructPVector *b, hypre_SStructPVector *x ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; HYPRE_Int max_iter = (relax_data -> max_iter); HYPRE_Int zero_guess = (relax_data -> zero_guess); HYPRE_Real weight = (relax_data -> weight); HYPRE_Int num_nodesets = (relax_data -> num_nodesets); HYPRE_Int *nodeset_ranks = (relax_data -> nodeset_ranks); hypre_Index *nodeset_strides = (relax_data -> nodeset_strides); hypre_SStructPVector *t = (relax_data -> t); HYPRE_Int **diag_rank = (relax_data -> diag_rank); hypre_ComputePkg **compute_pkgs = (relax_data -> compute_pkgs); hypre_ComputePkg ***svec_compute_pkgs= (relax_data -> svec_compute_pkgs); hypre_CommHandle **comm_handle = (relax_data -> comm_handle); hypre_ComputePkg *compute_pkg; hypre_ComputePkg *svec_compute_pkg; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *A_data_box; hypre_Box *b_data_box; hypre_Box *x_data_box; hypre_Box *t_data_box; HYPRE_Int Ai; HYPRE_Int bi; HYPRE_Int xi; HYPRE_Int ti; HYPRE_Real **tA_loc = (relax_data -> A_loc); HYPRE_Real *tx_loc = (relax_data -> x_loc); HYPRE_Real **A_loc; HYPRE_Real *x_loc; HYPRE_Real ***Ap = (relax_data -> Ap); HYPRE_Real **bp = (relax_data -> bp); HYPRE_Real **xp = (relax_data -> xp); HYPRE_Real **tp = (relax_data -> tp); hypre_StructMatrix *A_block; hypre_StructVector *x_block; hypre_IndexRef stride; hypre_IndexRef start; hypre_Index loop_size; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int iter, p, compute_i, i, j, si; HYPRE_Int nodeset; HYPRE_Int nvars, ndim; HYPRE_Int vi, vj; /*---------------------------------------------------------- * Initialize some things and deal with special cases *----------------------------------------------------------*/ hypre_BeginTiming(relax_data -> time_index); hypre_SStructPMatrixDestroy(relax_data -> A); hypre_SStructPVectorDestroy(relax_data -> b); hypre_SStructPVectorDestroy(relax_data -> x); hypre_SStructPMatrixRef(A, &(relax_data -> A)); hypre_SStructPVectorRef(x, &(relax_data -> x)); hypre_SStructPVectorRef(b, &(relax_data -> b)); (relax_data -> num_iterations) = 0; /* if max_iter is zero, return */ if (max_iter == 0) { /* if using a zero initial guess, return zero */ if (zero_guess) { hypre_SStructPVectorSetConstantValues(x, 0.0); } hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } /*---------------------------------------------------------- * Do zero_guess iteration *----------------------------------------------------------*/ p = 0; iter = 0; nvars = hypre_SStructPMatrixNVars(relax_data -> A); ndim = hypre_SStructPMatrixNDim(relax_data -> A); if (zero_guess) { if (num_nodesets > 1) { hypre_SStructPVectorSetConstantValues(x, 0.0); } nodeset = nodeset_ranks[p]; compute_pkg = compute_pkgs[nodeset]; stride = nodeset_strides[nodeset]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace( hypre_SStructPMatrixSMatrix(A,0,0)), i); b_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace( hypre_SStructPVectorSVector(b,0)), i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace( hypre_SStructPVectorSVector(x,0)), i); for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vi,vj) != NULL) { Ap[vi][vj] = hypre_StructMatrixBoxData( hypre_SStructPMatrixSMatrix(A,vi,vj), i, diag_rank[vi][vj]); } } bp[vi] = hypre_StructVectorBoxData( hypre_SStructPVectorSVector(b,vi), i); xp[vi] = hypre_StructVectorBoxData( hypre_SStructPVectorSVector(x,vi), i); } hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop3Begin(ndim, loop_size, A_data_box, start, stride, Ai, b_data_box, start, stride, bi, x_data_box, start, stride, xi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,bi,xi,vi,vj,x_loc,A_loc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, bi, xi) { A_loc = &tA_loc[hypre_BoxLoopBlock()*nvars]; x_loc = &tx_loc[hypre_BoxLoopBlock()*nvars]; /*------------------------------------------------ * Copy rhs and matrix for diagonal coupling * (intra-nodal) into local storage. *----------------------------------------------*/ for (vi = 0; vi < nvars; vi++) { x_loc[vi] = bp[vi][bi]; for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vi,vj) != NULL) { A_loc[vi][vj] = Ap[vi][vj][Ai]; } else { A_loc[vi][vj] = 0.0; } } } /*------------------------------------------------ * Invert intra-nodal coupling *----------------------------------------------*/ gselim(A_loc[0], x_loc, nvars); /*------------------------------------------------ * Copy solution from local storage. *----------------------------------------------*/ for (vi = 0; vi < nvars; vi++) { xp[vi][xi] = x_loc[vi]; } } hypre_BoxLoop3End(Ai, bi, xi); } } } if (weight != 1.0) { hypre_SStructPScale(weight, x); } p = (p + 1) % num_nodesets; iter = iter + (p == 0); } /*---------------------------------------------------------- * Do regular iterations *----------------------------------------------------------*/ while (iter < max_iter) { nodeset = nodeset_ranks[p]; compute_pkg = compute_pkgs[nodeset]; stride = nodeset_strides[nodeset]; hypre_SStructPCopy(x, t); for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { for (vi = 0; vi < nvars; vi++) { x_block = hypre_SStructPVectorSVector(x,vi); xp[vi] = hypre_StructVectorData(x_block); svec_compute_pkg = svec_compute_pkgs[nodeset][vi]; hypre_InitializeIndtComputations(svec_compute_pkg, xp[vi], &comm_handle[vi]); } compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { for (vi = 0; vi < nvars; vi++) { hypre_FinalizeIndtComputations(comm_handle[vi]); } compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace( hypre_SStructPMatrixSMatrix(A,0,0)), i); b_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace( hypre_SStructPVectorSVector(b,0)), i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace( hypre_SStructPVectorSVector(x,0)), i); t_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace( hypre_SStructPVectorSVector(t,0)), i); for (vi = 0; vi < nvars; vi++) { bp[vi] = hypre_StructVectorBoxData( hypre_SStructPVectorSVector(b,vi), i); tp[vi] = hypre_StructVectorBoxData( hypre_SStructPVectorSVector(t,vi), i); } hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, b_data_box, start, stride, bi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ti,vi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, ti) { /* Copy rhs into temp vector */ for (vi = 0; vi < nvars; vi++) { tp[vi][ti] = bp[vi][bi]; } } hypre_BoxLoop2End(bi, ti); for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vi,vj) != NULL) { A_block = hypre_SStructPMatrixSMatrix(A,vi,vj); x_block = hypre_SStructPVectorSVector(x,vj); stencil = hypre_StructMatrixStencil(A_block); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); for (si = 0; si < stencil_size; si++) { if (si != diag_rank[vi][vj]) { Ap[vi][vj] = hypre_StructMatrixBoxData( A_block,i,si); xp[vj] = hypre_StructVectorBoxData(x_block,i) + hypre_BoxOffsetDistance( x_data_box, stencil_shape[si]); hypre_BoxLoop3Begin(ndim, loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai,xi,ti) { tp[vi][ti] -= Ap[vi][vj][Ai] * xp[vj][xi]; } hypre_BoxLoop3End(Ai, xi, ti); } } } } } for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vi,vj) != NULL) { Ap[vi][vj] = hypre_StructMatrixBoxData( hypre_SStructPMatrixSMatrix(A,vi,vj), i, diag_rank[vi][vj]); } } } hypre_BoxLoop2Begin(ndim, loop_size, A_data_box, start, stride, Ai, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,ti,vi,vj,x_loc,A_loc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(Ai, ti) { A_loc = &tA_loc[hypre_BoxLoopBlock()*nvars]; x_loc = &tx_loc[hypre_BoxLoopBlock()*nvars]; /*------------------------------------------------ * Copy rhs and matrix for diagonal coupling * (intra-nodal) into local storage. *----------------------------------------------*/ for (vi = 0; vi < nvars; vi++) { x_loc[vi] = tp[vi][ti]; for (vj = 0; vj < nvars; vj++) { if (hypre_SStructPMatrixSMatrix(A,vi,vj) != NULL) { A_loc[vi][vj] = Ap[vi][vj][Ai]; } else { A_loc[vi][vj] = 0.0; } } } /*------------------------------------------------ * Invert intra-nodal coupling *----------------------------------------------*/ gselim(A_loc[0], x_loc, nvars); /*------------------------------------------------ * Copy solution from local storage. *----------------------------------------------*/ for (vi = 0; vi < nvars; vi++) { tp[vi][ti] = x_loc[vi]; } } hypre_BoxLoop2End(Ai, ti); } } } if (weight != 1.0) { hypre_SStructPScale((1.0 - weight), x); hypre_SStructPAxpy(weight, t, x); } else { hypre_SStructPCopy(t, x); } p = (p + 1) % num_nodesets; iter = iter + (p == 0); } (relax_data -> num_iterations) = iter; /*----------------------------------------------------------------------- * Return *-----------------------------------------------------------------------*/ hypre_IncFLOPCount(relax_data -> flops); hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetTol( void *relax_vdata, HYPRE_Real tol ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; (relax_data -> tol) = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetMaxIter( void *relax_vdata, HYPRE_Int max_iter ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; (relax_data -> max_iter) = max_iter; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetZeroGuess( void *relax_vdata, HYPRE_Int zero_guess ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; (relax_data -> zero_guess) = zero_guess; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetWeight( void *relax_vdata, HYPRE_Real weight ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; (relax_data -> weight) = weight; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetNumNodesets( void *relax_vdata, HYPRE_Int num_nodesets ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; HYPRE_Int i; /* free up old nodeset memory */ for (i = 0; i < (relax_data -> num_nodesets); i++) { hypre_TFree(relax_data -> nodeset_indices[i]); } hypre_TFree(relax_data -> nodeset_sizes); hypre_TFree(relax_data -> nodeset_ranks); hypre_TFree(relax_data -> nodeset_strides); hypre_TFree(relax_data -> nodeset_indices); /* alloc new nodeset memory */ (relax_data -> num_nodesets) = num_nodesets; (relax_data -> nodeset_sizes) = hypre_TAlloc(HYPRE_Int, num_nodesets); (relax_data -> nodeset_ranks) = hypre_TAlloc(HYPRE_Int, num_nodesets); (relax_data -> nodeset_strides) = hypre_TAlloc(hypre_Index, num_nodesets); (relax_data -> nodeset_indices) = hypre_TAlloc(hypre_Index *, num_nodesets); for (i = 0; i < num_nodesets; i++) { (relax_data -> nodeset_sizes[i]) = 0; (relax_data -> nodeset_ranks[i]) = i; (relax_data -> nodeset_indices[i]) = NULL; } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetNodeset( void *relax_vdata, HYPRE_Int nodeset, HYPRE_Int nodeset_size, hypre_Index nodeset_stride, hypre_Index *nodeset_indices ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; HYPRE_Int i; /* free up old nodeset memory */ hypre_TFree(relax_data -> nodeset_indices[nodeset]); /* alloc new nodeset memory */ (relax_data -> nodeset_indices[nodeset]) = hypre_TAlloc(hypre_Index, nodeset_size); (relax_data -> nodeset_sizes[nodeset]) = nodeset_size; hypre_CopyIndex(nodeset_stride, (relax_data -> nodeset_strides[nodeset])); for (i = 0; i < nodeset_size; i++) { hypre_CopyIndex(nodeset_indices[i], (relax_data -> nodeset_indices[nodeset][i])); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetNodesetRank( void *relax_vdata, HYPRE_Int nodeset, HYPRE_Int nodeset_rank ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; (relax_data -> nodeset_ranks[nodeset]) = nodeset_rank; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_NodeRelaxSetTempVec( void *relax_vdata, hypre_SStructPVector *t ) { hypre_NodeRelaxData *relax_data = (hypre_NodeRelaxData *)relax_vdata; hypre_SStructPVectorDestroy(relax_data -> t); hypre_SStructPVectorRef(t, &(relax_data -> t)); return hypre_error_flag; }
sum.c
#include <omp.h> #define N 100000000 #define NTHREADS 8 int values[N]; int main(int argc, char *argv[]) { int tid; static int sum[NTHREADS]; #ifdef _OPENMP omp_set_num_threads(NTHREADS); #pragma omp parallel private(tid) { tid = omp_get_thread_num(); #else for (tid = 0; tid < NTHREADS; tid++) { #endif for (int i = 0; i < N; i++) sum[tid] += values[i] >> tid; } }
pastix.c
/* CalculiX - A 3-dimensional finite element program */ /*Copyright (C) 1998-2020 Guido Dhondt*/ /* This program is free software; you can redistribute it and/or */ /* modify it under the terms of the GNU General Public License as*/ /* published by the Free Software Foundation(version 2);*/ /**/ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of*/ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the*/ /* GNU General Public License for more details.*/ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifdef PASTIX #include <pastix.h> #include <spm.h> #include <sys/time.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include "CalculiX.h" #include "pastix.h" // Time structs for benchmarking extern struct timespec totalCalculixTimeStart, totalCalculixTimeEnd; double totalPastixTime; // Variables for storing reuse history int totalIterations = 0; int totalReused = 0; // Current sparse matrix in STI format double* auPtr = NULL; double* adPtr = NULL; ITG *icolTotal = NULL, *irowTotal = NULL; // Matrix data from previous iteration ITG neqPrev=0, nzsPrev=0; ITG *icolPrev=NULL,*irowPrev=NULL,*jqPrev=NULL; ITG inputformatPrev=-1; ITG basePrev=1; // Current sparse matrix in CSC double *aupastix=NULL; ITG *icolpastix=NULL,*irowpastix=NULL; // Global variable that indicates whether we are currently reusing or not char redo = 1; // Global variable which data set was previously used (basic/radiation) and now #define BASIC 1 #define AS 2 char modePrev = BASIC; char mode = BASIC; char needCleanup=0; // PaStiX configuration spm_int_t iparm_basic[IPARM_SIZE]; spm_int_t iparm_as[IPARM_SIZE]; double dparm_basic[DPARM_SIZE]; double dparm_as[DPARM_SIZE]; spm_int_t *iparm = iparm_basic; double *dparm = dparm_basic; pastix_data_t* pastix_data = NULL; spmatrix_t *spm = NULL; // GPU active or not char gpu = 0; // Store how many nzs the merged Matrix has ITG nzsTotal = 0; // Size of allocated space for sparse matrix ITG pastix_nnzBound = 0; // Number of iterations that failed with mixed precision ITG mixedFailed = 0; // indicates whether this is the first invocation of PaStiX or not char firstIter = 1; // When this flag is activated, PaStiX will not reuse in the next iteration char forceRedo = 0; // Use double or mixed precision char mixed = 1; char globDoublePrecision = 0; // This is set to one, when to many iterations with mixed precision did not converge char stickToDouble = 0; // Pointers for faster matrix transpose ITG *irowacc = NULL; ITG *irowPrediction = NULL; // Number of threads ITG nthread_mkl=0; struct pastix_data_s { int totalIterations; int totalReused; ITG *icolTotal; ITG *irowTotal; ITG neqPrev; ITG nzsPrev; ITG *icolPrev; ITG *irowPrev; ITG *jqPrev; ITG inputformatPrev; ITG basePrev; double *aupastix; ITG *icolpastix; ITG *irowpastix; char redo; ITG nzsTotal; ITG pastix_nnzBound; ITG mixedFailed; char firstIter; char forceRedo; char globDoublePrecision; char stickToDouble; ITG *irowacc; ITG *irowPrediction; spm_int_t *iparm; double *dparm; char gpu; char mixed; pastix_data_t* pastix_data; spmatrix_t *spm; }; typedef struct pastix_data_s pastix_data_object; pastix_data_object pastix_mode_basic = { 0,0,NULL,NULL,0,0,NULL,NULL,NULL,-1,0, NULL,NULL,NULL,1,0,0,0,1,0,0,0,NULL,NULL,iparm_basic,dparm_basic, 0,1,NULL,NULL }; pastix_data_object pastix_mode_as = { 0,0,NULL,NULL,0,0,NULL,NULL,NULL,-1,0, NULL,NULL,NULL,1,0,0,0,1,0,0,0,NULL,NULL,iparm_as,dparm_as, 0,1,NULL,NULL }; // Initializes and configurates PaStiX environment. Also forwards the sparse matrix pointers void pastix_init(double *ad, double *au, double *adb, double *aub, double *sigma,ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat, ITG *jq, ITG *nzs3){ // if reusing, only update the value pointer of the sparse matrix if(!redo){ pastixResetSteps(pastix_data); spm->values = aupastix; printf("\n"); spmPrintInfo( spm, stdout ); printf("\n"); return; } ITG nthread, nthread_v; char *env; /*set MKL_NUM_THREADS to min(CCX_NPROC_EQUATION_SOLVER,OMP_NUM_THREADS) must be done once*/ if (nthread_mkl == 0) { nthread=1; env=getenv("MKL_NUM_THREADS"); if(env) { nthread=atoi(env);} else { env=getenv("OMP_NUM_THREADS"); if(env) {nthread=atoi(env);} } env=getenv("CCX_NPROC_EQUATION_SOLVER"); if(env) { nthread_v=atoi(env); if (nthread_v <= nthread) {nthread=nthread_v;} } if (nthread < 1) {nthread=1;} nthread_mkl=nthread; } // Init integer and double parameters with default values pastixInitParam( iparm, dparm ); // Set best PaStiX parameters for CalculiX usage iparm[IPARM_ORDERING] = PastixOrderScotch; if( mode == AS ){ iparm[IPARM_SCHEDULER] = PastixSchedStatic; } else{ iparm[IPARM_SCHEDULER] = PastixSchedParsec; } iparm[IPARM_THREAD_NBR] = nthread_mkl; iparm[IPARM_GPU_NBR] = (int) gpu; iparm[IPARM_FLOAT] = globDoublePrecision ? 3 : 2; iparm[IPARM_MIN_BLOCKSIZE] = 1024; iparm[IPARM_MAX_BLOCKSIZE] = 2048; iparm[IPARM_FACTORIZATION] = PastixFactLU; iparm[IPARM_TASKS2D_WIDTH] = globDoublePrecision ? 256 : 128; iparm[IPARM_REUSE_LU] = firstIter ? 0 : 1; iparm[IPARM_REUSE_LU] = forceRedo ? 2 : 1; iparm[IPARM_GPU_MEMORY_PERCENTAGE] = 95; iparm[IPARM_GPU_MEMORY_BLOCK_SIZE] = 64 * 1024; dparm[DPARM_EPSILON_REFINEMENT] = 1e-12; iparm[IPARM_ITERMAX] = 50; iparm[IPARM_GMRES_IM] = 50; // Initialize sparse matrix spm = malloc( sizeof( spmatrix_t ) ); spmInit(spm); spm->flttype = globDoublePrecision ? SpmDouble : SpmFloat; spm->values = aupastix; spm->fmttype = SpmCSC; spm->nexp = spm->gNexp = spm->gN = spm->n = *neq; spm->mtxtype = SpmGeneral; if( *inputformat == 3 ){ spm->nnzexp = spm->gnnzexp = spm->gnnz = spm->nnz = nzsTotal + *neq; spm->flttype = SpmDouble; } else{ spm->nnzexp = spm->gnnzexp = spm->gnnz = spm->nnz = nzsTotal * 2 + *neq; } spm->colptr = (spm_int_t*) icolpastix; spm->rowptr = (spm_int_t*) irowpastix; if(*inputformat == 3){ spmatrix_t *spm_tmp=NULL; spm_tmp = spmCopy(spm); spmCheckAndCorrect( spm_tmp, spm); spm->flttype = globDoublePrecision ? SpmDouble : SpmFloat; } // initialize pastix pastixInit( &pastix_data, MPI_COMM_WORLD, iparm, dparm ); printf("\n"); spmPrintInfo( spm, stdout ); printf("\n"); // perform reordering, analysis and symbolic factorization pastix_task_analyze( pastix_data, spm ); } void pastix_csc_conversion(double *ad, double *au, double *adb, double *aub, double *sigma,ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat, ITG *jq, ITG *nzs3){ ITG i,j; char merged=0; // jq for the merged matrix ITG* jqTotal = NULL; redo = forceRedo ? 1 : 0; if(*neq != neqPrev || *inputformat != inputformatPrev) redo = 1; if(!redo){ nzsTotal = 0; NNEW(icolTotal, ITG, *neq); ITG base = jq[0]; // Compute the number of entries in the merged matrix #pragma omp parallel for reduction(+:nzsTotal) for(i=0;i<*neq;i++){ ITG kCur = jq[i] - base; ITG kPrev = jqPrev[i] - basePrev; ITG curColTotal = 0; while(kCur < jq[i+1] - base && kPrev < jqPrev[i+1] - basePrev) { if(irowPrev[kPrev] == irow[kCur]){ kCur++; kPrev++; } else{ if(irowPrev[kPrev] < irow[kCur]) kPrev++; else // irowPrev[kPrev] > irow[k] kCur++; } curColTotal++; } while(kCur < jq[i+1] - base){ kCur++; curColTotal++; } while(kPrev < jqPrev[i+1] - basePrev){ kPrev++; curColTotal++; } icolTotal[i] = curColTotal; nzsTotal += curColTotal; } // compute jq for the merged matrix NNEW(jqTotal, ITG, (*neq+1)); jqTotal[0] = base; for(i = 0; i < *neq; i++){ jqTotal[i+1] = jqTotal[i] + icolTotal[i]; } // If the number of entries in the merged matrix is the same as in the last iteration, we can reuse if(nzsTotal == nzsPrev){ printf("Reusing csc.!\n"); } else{ redo = 1; printf("Not reusing csc, merging patterns!\n"); } // allocate space for the sparse matrix if(*symmetryflag || *inputformat != 3) NNEW(auPtr,double,2 * nzsTotal); else NNEW(auPtr,double,nzsTotal); NNEW(adPtr,double,neqPrev); NNEW(irowTotal, ITG, nzsTotal); if(*symmetryflag || *inputformat != 3){ j=2*nzsTotal; } else{ j=nzsTotal; } #pragma omp parallel for for(i=0;i<j;i++){ auPtr[i] = 0.0; } #pragma omp parallel for for(i=0;i<*neq;i++){ adPtr[i] = 0.0; } // merge the old and the new sparsity pattern #pragma omp parallel for for(i=0;i<*neq;i++){ ITG kCur = jq[i] - base; ITG kPrev = jqPrev[i] - base; ITG kTotal = jqTotal[i] - base; adPtr[i] = ad[i] - (*sigma == 0 ? 0.0 : (*sigma)*adb[i]); while(kCur < jq[i+1] - base && kPrev < jqPrev[i+1] - base) { if(irowPrev[kPrev] == irow[kCur]){ auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]); if(*symmetryflag && *inputformat != 3 ) auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]); irowTotal[kTotal] = irow[kCur]; kCur++; kPrev++; } else{ if(irowPrev[kPrev] < irow[kCur]){ // auPtr[kTotal] = 0.0; // if(*symmetryflag) // auPtr[kTotal + nzsTotal] = 0.0; irowTotal[kTotal] = irowPrev[kPrev]; kPrev++; } else // irowPrev[kPrev] > irow[k] { auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]); if(*symmetryflag && *inputformat != 3) auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]); irowTotal[kTotal] = irow[kCur]; kCur++; } } kTotal++; } while(kCur < jq[i+1] - base){ auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]); if(*symmetryflag && *inputformat != 3) auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]); irowTotal[kTotal] = irow[kCur]; kCur++; kTotal++; } while(kPrev < jqPrev[i+1] - base){ // auPtr[kTotal] = 0.0; // if(*symmetryflag) // auPtr[kTotal + nzsTotal] = 0.0; irowTotal[kTotal] = irowPrev[kPrev]; kPrev++; kTotal++; } } SFREE(irowPrev); SFREE(icolPrev); SFREE(jqPrev); // update pointers to the merged matrix icol = icolTotal; icolPrev = icolTotal; irow = irowTotal; irowPrev = irowTotal; jqPrev = jqTotal; nzsPrev = nzsTotal; basePrev = base; au = auPtr; ad = adPtr; merged = 1; } else { // This is executed in either the first iteration, or when the number of equations changed printf("Not reusing csc.\n"); if(icolPrev != NULL){ SFREE(icolPrev); SFREE(irowPrev); SFREE(jqPrev); } NNEW(icolPrev,ITG,*neq); NNEW(irowPrev,ITG,*nzs); NNEW(jqPrev,ITG,*neq+1); memcpy(icolPrev, icol, sizeof(ITG) * *neq); memcpy(irowPrev, irow, sizeof(ITG) * *nzs); memcpy(jqPrev, jq, sizeof(ITG) * (*neq+1)); nzsTotal = *nzs; nzsPrev = *nzs; neqPrev = *neq; jqTotal = jqPrev; inputformatPrev = *inputformat; } // Convert Matrix Format if(*inputformat==1 || *symmetryflag==0){ /* lower triangular matrix is stored column by column in au, followed by the upper triangular matrix row by row; the diagonal terms are stored in ad */ // allocate space for the matrix and the utility arrays if(redo){ // We allocate 10% more space for the values than required so that we have to perform the expensive cudaMallocHost only once, even when the size of the matrix increases slightly if((nzsTotal * 2 + *neq) > pastix_nnzBound){ // perform the call with PaStiX because pinned memory allocation via CUDA is performed if gpu is activated pastixAllocMemory((void**)&aupastix, sizeof(double) * 1.1 * (nzsTotal * 2 + *neq), gpu); pastix_nnzBound = 1.1 * (nzsTotal * 2 + *neq); } NNEW(irowpastix,ITG,nzsTotal*2+*neq); NNEW(icolpastix,ITG,*neq+1); if(irowacc != NULL){ SFREE(irowacc); } NNEW(irowacc,ITG,*neq); if(irowPrediction != NULL){ SFREE(irowPrediction); } NNEW(irowPrediction,ITG,nzsTotal); } // Compute utility pointers for parallelization // irowPrediction stores the offset to the first entry in it's column of each entry // irowacc stores the number of elements in each row if(redo){ for(i=0;i<nzsTotal;i++){ irowPrediction[i] = irowacc[irow[i]-1]++; } icolpastix[0] = 1; for(i=0;i<*neq;i++){ icolpastix[i+1] = icolpastix[i] + icol[i] + irowacc[i] + 1; } } // copy lower triangular values to the right position in the CSC #pragma omp parallel for private(i, j) for(i=0;i<*neq;i++){ ITG k_pastix = icolpastix[i] + irowacc[i]; ITG k = jqTotal[i] - 1; aupastix[k_pastix-1] = ad[i] - (merged != 0 ? 0 : (*sigma == 0 ? 0.0 : (*sigma)*adb[i])); memcpy(aupastix + k_pastix, au + k, sizeof(double) * icol[i]); if(*sigma != 0.0 && !merged ){ for(j=0;j<icol[i];j++){ aupastix[k_pastix+j] -= (*sigma)*aub[k+j]; } } } // copy the upper triangular values to the right position in the CSC #pragma omp parallel for private(i, j) for(i=0;i<*neq;i++){ ITG k = jqTotal[i] - 1; for(j=0;j<icol[i];j++){ aupastix[irowPrediction[k] + icolpastix[irow[k]-1] - 1] = au[k+(*symmetryflag == 0 ? 0 : (*nzs == *nzs3 ? nzsTotal : *nzs3))] - (merged != 0 ? 0 : (*sigma == 0 ? 0.0 : (*sigma *aub[k+(*symmetryflag == 0 ? 0 : (*nzs == *nzs3 ? nzsTotal : *nzs3))]))); k++; } } // do the same for the rowptr (does not change when reusing) if(redo){ #pragma omp parallel for for(i=0;i<*neq;i++){ ITG k_pastix = icolpastix[i] + irowacc[i]; ITG k = jqTotal[i] - 1; irowpastix[k_pastix-1] = i+1; memcpy(irowpastix + k_pastix, irow + k, sizeof(ITG) * icol[i]); } #pragma omp parallel for private(i, j) for(i=0;i<*neq;i++){ ITG k = jqTotal[i] - 1; for(j=0;j<icol[i];j++){ irowpastix[irowPrediction[k] + icolpastix[irow[k]-1] - 1] = i+1; k++; } } } } else if(*inputformat==3){ if(redo){ NNEW(irowpastix,ITG,nzsTotal+*neq); NNEW(icolpastix,ITG,*neq+1); if((nzsTotal + *neq) > pastix_nnzBound){ pastixAllocMemory((void**)&aupastix, sizeof(double) * 1.1 * (nzsTotal + *neq), gpu); pastix_nnzBound = 1.1 * (nzsTotal + *neq); } memcpy(icolpastix, jqTotal, sizeof(ITG) * (*neq+1)); } ITG kflag,ilength; kflag = 2; #pragma omp parallel for private(i, j) for(i=0;i<*neq;i++){ if(redo){ memcpy(irowpastix + i + (jqTotal[i] - jqTotal[0]), irow + (jqTotal[i]-jqTotal[0]), sizeof(ITG) * (jqTotal[i+1] - jqTotal[i])); irowpastix[i+jqTotal[i+1]-1] = i+1; icolpastix[i+1] += i+1; } memcpy(aupastix + i + (jqTotal[i] - jqTotal[0]), au + (jqTotal[i]-jqTotal[0]), sizeof(double) * (jqTotal[i+1] - jqTotal[i])); if(*sigma != 0 && merged == 0){ for(j=0;j<jqTotal[i+1]-jqTotal[i];j++){ aupastix[i+(jqTotal[i]-jqTotal[0])+j] -= (*sigma)*aub[jqTotal[i]-jqTotal[0]+j]; } } aupastix[i+jq[i+1]-1] = ad[i] - (merged != 0 ? 0.0 : (*sigma == 0 ? 0.0 : (*sigma)*adb[i])); } if(redo) icolpastix[*neq] = nzsTotal + *neq + 1; } // Free the merged array in STI format if(auPtr){ SFREE(auPtr); auPtr = NULL; } if(adPtr){ SFREE(adPtr); adPtr = NULL; } } // PaStiX invocation when the factorization function is called individually void pastix_factor_main_generic(double *ad, double *au, double *adb, double *aub, double *sigma,ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat, ITG *jq, ITG *nzs3){ pastix_set_globals(mode); gpu = 0; // Set GPU flag from environment // const char* pastix_gpu = getenv("PASTIX_GPU"); // if(pastix_gpu) // //gpu = (*pastix_gpu == '1') ? 1 : 0; // gpu = ( mode == AS ) ? 0 : (*pastix_gpu == '1') ? 1 : 0; // Perform individual invocations always in double precision. If previous iterations were in single precision, do not reuse. forceRedo=1; globDoublePrecision = 1; // invoke PaStiX pastix_csc_conversion(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3); pastix_cleanup(neq,symmetryflag); pastix_init(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3); pastix_factor(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3); } // invokes the factorization routine of PaStiX void pastix_factor(double *ad, double *au, double *adb, double *aub, double *sigma,ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat, ITG *jq, ITG *nzs3){ if(spm->n == 1) return; pastix_task_numfact( pastix_data, spm ); } // invokes the solve and iterative refinement routines of PaStiX ITG pastix_solve_generic(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){ ITG i; double* b; float* buffer; // dont call pastix with only one equation, might lead to segfault if(spm->n == 1) { x[0] = x[0] / aupastix[0]; return 0; } // check whether the RHS consists of only Zeroes and return in that case char allZero = 1; for(i = 0; i < *neq; i++){ if(x[i] != 0){ allZero = 0; break; } } if(allZero){ printf("RHS only consists of 0.0\n"); return 0; } //Copy the b so that we can modify x without losing b NNEW(b,double,*nrhs**neq); memcpy(b, x, sizeof(double) * (*nrhs) * (*neq)); // If we are in mixed precision mode, cast double x to float x and call solve. Afterwards upcast the solution. if(!globDoublePrecision){ NNEW(buffer,float,*nrhs**neq); #pragma omp parallel for for(i = 0; i < (*nrhs) * (*neq); i++){ buffer[i] = (float) x[i]; } pastix_task_solve( pastix_data, *nrhs, buffer, spm->n ); #pragma omp parallel for for(i = 0; i < (*nrhs) * (*neq); i++){ x[i] = (double) buffer[i]; } SFREE(buffer); } else{ pastix_task_solve( pastix_data, *nrhs, x, spm->n ); } // invoke iterative refinement in double precision ITG rc = pastix_task_refine( pastix_data, spm->n, *nrhs, (void*)b, spm->n, (void*)x, spm->n ); // FILE *f=fopen("spm.out","a"); // fprintf(f,"\n\nMatrix\n"); // spmConvert(SpmCSR, spm); // spmPrint( spm, f); // spmConvert(SpmCSC, spm); // // fprintf(f,"Solution vector b:\n"); // for(i=0;i<*neq;i++){ // fprintf(f,"b[%d] = %.17f\n",i,b[i]); // } // // fprintf(f,"Solution vector x:\n"); // for(i=0;i<*neq;i++){ // fprintf(f,"x[%d] = %.17f\n",i,x[i]); // } // // fclose(f); SFREE(b); modePrev = mode; return rc; } ITG pastix_solve(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){ mode = BASIC; if( modePrev != mode ) pastix_set_globals(mode); ITG rc = pastix_solve_generic(x,neq,symmetryflag,nrhs); return rc; } ITG pastix_solve_as(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){ mode = AS; if( modePrev != mode ) pastix_set_globals(mode); ITG rc = pastix_solve_generic(x,neq,symmetryflag,nrhs); return rc; } // Invokes pastixFinalize and spmExit which frees everything but the dense LU array and parsec pointer void pastix_cleanup(ITG *neq,ITG *symmetryflag){ if( redo && !firstIter ){ spmExit( spm ); if(spm != NULL) free( spm ); pastixFinalize( &pastix_data ); } firstIter = 0; return; } void pastix_cleanup_as(ITG *neq,ITG *symmetryflag){ pastix_cleanup(neq,symmetryflag); return; } // main method for executing PaStiX void pastix_main_generic(double *ad, double *au, double *adb, double *aub, double *sigma,double *b, ITG *icol, ITG *irow, ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat, ITG *jq, ITG *nzs3,ITG *nrhs){ if(*neq==0) return; pastix_set_globals( mode ); const char* pastix_gpu = getenv("PASTIX_GPU"); if(pastix_gpu) gpu = (*pastix_gpu == '1') ? 1 : 0; // check mixed precision environment variable const char* pastix_mixed = getenv("PASTIX_MIXED_PRECISION"); if( pastix_mixed && globDoublePrecision == 0 ){ mixed = (*pastix_mixed == '1') ? 1 : 0; globDoublePrecision = (mixed == 0) ? 1 : 0; } else{ mixed = 1; } if(!stickToDouble && mixed == '1'){ if(globDoublePrecision == 1) forceRedo = 1; globDoublePrecision = 0; } if(*inputformat==3){ forceRedo=1; } // backup b in case mixed precision solve corrupts the original array double* b_backup = NULL; NNEW(b_backup, double, *nrhs * *neq); memcpy(b_backup, b, sizeof(double) * (*nrhs)*(*neq)); // benchmarking structs struct timespec start, end; struct timespec stepCscConversionStart, stepCscConversionEnd; struct timespec stepInitStart, stepInitEnd; struct timespec stepFactorizeStart, stepFactorizeEnd; struct timespec stepSolveStart, stepSolveEnd; struct timespec stepCleanUpStart, stepCleanUpEnd; double pastixTime, stepCscConversion, stepInit, stepFactorize, stepSolve, stepCleanUp, totalCCXTime, CCXwithoutPastix; clock_gettime(CLOCK_MONOTONIC, &start); clock_gettime(CLOCK_MONOTONIC, &stepCscConversionStart); // invoke csc conversion pastix_csc_conversion(ad,au,adb,aub,sigma,icol,irow, neq,nzs,symmetryflag,inputformat,jq,nzs3); clock_gettime(CLOCK_MONOTONIC, &stepCscConversionEnd); clock_gettime(CLOCK_MONOTONIC, &stepCleanUpStart); // invoke cleanup pastix_cleanup(neq,symmetryflag); clock_gettime(CLOCK_MONOTONIC, &stepCleanUpEnd); clock_gettime(CLOCK_MONOTONIC, &stepInitStart); //invoke init pastix_init(ad,au,adb,aub,sigma,icol,irow, neq,nzs,symmetryflag,inputformat,jq,nzs3); clock_gettime(CLOCK_MONOTONIC, &stepInitEnd); clock_gettime(CLOCK_MONOTONIC, &stepFactorizeStart); // invoke factor pastix_factor(ad,au,adb,aub,sigma,icol,irow, neq,nzs,symmetryflag,inputformat,jq,nzs3); clock_gettime(CLOCK_MONOTONIC, &stepFactorizeEnd); clock_gettime(CLOCK_MONOTONIC, &stepSolveStart); // if solve does not converge if(pastix_solve_generic(b,neq,symmetryflag,nrhs) == -1){ // Give up, if we tried it with double precision, use backup b otherwise if(globDoublePrecision == 1){ printf("PaStiX could not converge to a valid result\n"); exit(1); } else{ memcpy(b, b_backup, sizeof(double) * (*nrhs)*(*neq)); } // make sure that we switch to double and do not reuse in the next iteration globDoublePrecision = 1; forceRedo = 1; stickToDouble = 1; mixedFailed++; // call pastix_main recursively, but now in double precision pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3, nrhs); // if we do not converge with mixed precision for the third time, permanently switch to double precision if(mixedFailed <= 2){ stickToDouble = 0; forceRedo = 1; } return; } else{ forceRedo = 0; } clock_gettime(CLOCK_MONOTONIC, &stepSolveEnd); clock_gettime(CLOCK_MONOTONIC, &end); // compute benchmark times pastixTime = (end.tv_sec - start.tv_sec) * 1e9; pastixTime = (pastixTime + (end.tv_nsec - start.tv_nsec)) * 1e-9; totalPastixTime += pastixTime; clock_gettime(CLOCK_MONOTONIC, &totalCalculixTimeEnd); totalCCXTime = (totalCalculixTimeEnd.tv_sec - totalCalculixTimeStart.tv_sec) * 1e9; totalCCXTime = (totalCCXTime + (totalCalculixTimeEnd.tv_nsec - totalCalculixTimeStart.tv_nsec)) * 1e-9; CCXwithoutPastix = totalCCXTime - totalPastixTime; stepCscConversion = (stepCscConversionEnd.tv_sec - stepCscConversionStart.tv_sec) * 1e9; stepCscConversion = (stepCscConversion + (stepCscConversionEnd.tv_nsec - stepCscConversionStart.tv_nsec)) * 1e-9; stepInit = (stepInitEnd.tv_sec - stepInitStart.tv_sec) * 1e9; stepInit = (stepInit + (stepInitEnd.tv_nsec - stepInitStart.tv_nsec)) * 1e-9; stepFactorize = (stepFactorizeEnd.tv_sec - stepFactorizeStart.tv_sec) * 1e9; stepFactorize = (stepFactorize + (stepFactorizeEnd.tv_nsec - stepFactorizeStart.tv_nsec)) * 1e-9; stepSolve = (stepSolveEnd.tv_sec - stepSolveStart.tv_sec) * 1e9; stepSolve = (stepSolve + (stepSolveEnd.tv_nsec - stepSolveStart.tv_nsec)) * 1e-9; stepCleanUp = (stepCleanUpEnd.tv_sec - stepCleanUpStart.tv_sec) * 1e9; stepCleanUp = (stepCleanUp + (stepCleanUpEnd.tv_nsec - stepCleanUpStart.tv_nsec)) * 1e-9; // upate iteration timer totalIterations++; if(!redo) totalReused++; // benchmark output printf("________________________________________\n\n"); printf("CSC Conversion Time: %lf\n", stepCscConversion); printf("Init Time: %lf\n", stepInit); printf("Factorize Time: %lf\n", stepFactorize); printf("Solve Time: %lf\n", stepSolve); printf("Clean up Time: %lf\n", stepCleanUp); printf("---------------------------------\n"); printf("Sum: %lf\n", pastixTime); printf("\n"); printf("Total PaStiX Time: %lf\n", totalPastixTime); printf("CCX without PaStiX Time: %lf\n", CCXwithoutPastix); printf("Share of PaStiX Time: %lf\n", totalPastixTime / totalCCXTime ); printf("Total Time: %lf\n", totalCCXTime); printf("Reusability: %d : %d \n", totalReused, totalIterations); printf("________________________________________\n\n"); SFREE(b_backup); return; } void pastix_factor_main(double *ad, double *au, double *adb, double *aub, double *sigma,ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat, ITG *jq, ITG *nzs3){ mode = BASIC; pastix_factor_main_generic(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3); return; } void pastix_factor_main_as(double *ad, double *au, double *adb, double *aub, double *sigma,ITG *icol, ITG *irow, ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat, ITG *jq, ITG *nzs3){ mode = AS; pastix_factor_main_generic(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3); return; } void pastix_main(double *ad, double *au, double *adb, double *aub, double *sigma,double *b, ITG *icol, ITG *irow, ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat, ITG *jq, ITG *nzs3,ITG *nrhs){ mode = BASIC; pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3, nrhs); return; } void pastix_main_as(double *ad, double *au, double *adb, double *aub, double *sigma,double *b, ITG *icol, ITG *irow, ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat, ITG *jq, ITG *nzs3,ITG *nrhs){ mode = AS; pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3, nrhs); return; } void pastix_set_globals(char mode){ if( modePrev != mode ){ pastix_data_object *temp,*temp2; switch(mode){ case BASIC: temp = &pastix_mode_basic; temp2 = &pastix_mode_as; break; case AS: temp = &pastix_mode_as; temp2 = &pastix_mode_basic; break; } // saving old data set temp2->totalIterations = totalIterations; temp2->totalReused = totalReused; // Current sparse matrix in STI format temp2->icolTotal = icolTotal; temp2->irowTotal = irowTotal; // Matrix data from previous iteration temp2->neqPrev = neqPrev; temp2->nzsPrev = nzsPrev; temp2->icolPrev = icolPrev; temp2->irowPrev = irowPrev; temp2->jqPrev = jqPrev; temp2->inputformatPrev = inputformatPrev; temp2->basePrev = basePrev; // Current sparse matrix in CSC temp2->aupastix = aupastix; temp2->icolpastix = icolpastix; temp2->irowpastix = irowpastix; // Global variable that indicates whether we are currently reusing or not temp2->redo = redo; // PaStiX configuration temp2->iparm = iparm; temp2->dparm = dparm; temp2->pastix_data = pastix_data; temp2->spm = spm; // GPU active or not temp2->gpu = gpu; // Store how many nzs the merged Matrix has temp2->nzsTotal = nzsTotal; // Size of allocated space for sparse matrix temp2->pastix_nnzBound = pastix_nnzBound; // Number of iterations that failed with mixed precision temp2->mixedFailed = mixedFailed; // indicates whether this is the first invocation of PaStiX or not temp2->firstIter = firstIter; // When this flag is activated, PaStiX will not reuse in the next iteration temp2->forceRedo = forceRedo; // Use double or mixed precision temp2->mixed = mixed; temp2->globDoublePrecision = globDoublePrecision; // This is set to one, when to many iterations with mixed precision did not converge temp2->stickToDouble = stickToDouble; // Pointers for faster matrix transpose temp2->irowacc = irowacc; temp2->irowPrediction = irowPrediction; // Number of threads // nthread_mkl=0; // setting new data set totalIterations = temp->totalIterations; totalReused = temp->totalReused; // Current sparse matrix in STI format icolTotal = temp->icolTotal; irowTotal = temp->irowTotal; // Matrix data from previous iteration neqPrev = temp->neqPrev; nzsPrev = temp->nzsPrev; icolPrev = temp->icolPrev; irowPrev = temp->irowPrev; jqPrev = temp->jqPrev; inputformatPrev = temp->inputformatPrev; basePrev = temp->basePrev; // Current sparse matrix in CSC aupastix = temp->aupastix; icolpastix = temp->icolpastix; irowpastix = temp->irowpastix; // Global variable that indicates whether we are currently reusing or not redo = temp->redo; // PaStiX configuration iparm = temp->iparm; dparm = temp->dparm; pastix_data = temp->pastix_data; spm = temp->spm; // GPU active or not gpu = temp->gpu; // Store how many nzs the merged Matrix has nzsTotal = temp->nzsTotal; // Size of allocated space for sparse matrix pastix_nnzBound = temp->pastix_nnzBound; // Number of iterations that failed with mixed precision mixedFailed = temp->mixedFailed; // indicates whether this is the first invocation of PaStiX or not firstIter = temp->firstIter; // When this flag is activated, PaStiX will not reuse in the next iteration forceRedo = temp->forceRedo; // Use double or mixed precision mixed = temp->mixed; globDoublePrecision = temp->globDoublePrecision; // This is set to one, when to many iterations with mixed precision did not converge stickToDouble = temp->stickToDouble; // Pointers for faster matrix transpose irowacc = temp->irowacc; irowPrediction = temp->irowPrediction; // Number of threads // nthread_mkl=0; if( firstIter ){ forceRedo=1; } } modePrev = mode; return; } #endif
krb5-18_fmt_plug.c
/* * KRB5 - Enctype 18 (aes256-cts-hmac-sha1-96) cracker patch for JtR * Created on August of 2012 by Mougey Camille (CEA/DAM) & Lalet Pierre (CEA/DAM) * * This format is one of formats saved in KDC database and used during the authentication part * * This software is Copyright (c) 2012, Mougey Camille (CEA/DAM) * Lalet Pierre (CEA/DAM) * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format : * - user:$krb18$REALMname$hash * - user:REALMname$hash * * Format rewritten Dec, 2014, without use of -lkrb5, by JimF. Now we use 'native' JtR * pbkdf2-hmac-sha1() and simple call to 2 AES limb encrypt for entire process. Very * simple, and 10x faster, and no obsure -lkrb5 dependency */ #if AC_BUILT #include "autoconfig.h" #endif #if FMT_EXTERNS_H extern struct fmt_main fmt_krb5_18; #elif FMT_REGISTERS_H john_register_one(&fmt_krb5_18); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "simd-intrinsics.h" #include "pbkdf2_hmac_sha1.h" #include "aes.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 8 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32 #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "krb5-18" #define FORMAT_NAME "Kerberos 5 db etype 18" #define FORMAT_TAG "$krb18$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #if SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " AES" #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR " AES" #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 64 #define CIPHERTEXT_LENGTH 64 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE CIPHERTEXT_LENGTH #define SALT_ALIGN 1 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests kinit_tests[] = { {"OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"}, {FORMAT_TAG "OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static char saved_salt[SALT_SIZE+1]; static uint32_t (*crypt_out)[16]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; p = strstr(p, "$"); if (p == NULL) return 0; q = ciphertext; if (p - q > SALT_SIZE) /* check salt length */ return 0; q = ++p; while (atoi16l[ARCH_INDEX(*q)] != 0x7F) { q++; } return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + SALT_SIZE + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return ciphertext; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpyn(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + SALT_SIZE + 1); return out; } static void *get_salt(char *ciphertext) { static char out[SALT_SIZE+1]; char *p, *q; memset(&out, 0, sizeof(out)); p = ciphertext + TAG_LENGTH; q = strstr(p, "$"); strncpy(out, p, q-p); out[q-p] = 0; return out; } static void set_salt(void *salt) { strcpy(saved_salt, salt); } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i = 0; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; p = strstr(p, "$") + 1; for (; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int crypt_all(int *pcount, struct db_salt *_salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[32], i; AES_KEY aeskey; #ifdef SSE_GROUP_SZ_SHA1 uint32_t Key[SSE_GROUP_SZ_SHA1][32/4]; int lens[SSE_GROUP_SZ_SHA1]; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { uint32_t *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = Key[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)saved_salt, strlen(saved_salt), 4096, &(x.poutc), 32, 0); #else pbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index]), (const unsigned char*)saved_salt, strlen(saved_salt), 4096, key, 32, 0); #endif i=0; #ifdef SSE_GROUP_SZ_SHA1 for (; i < SSE_GROUP_SZ_SHA1; ++i) { memcpy(key, Key[i], 32); #endif AES_set_encrypt_key(key, 256, &aeskey); AES_encrypt((unsigned char*)"kerberos{\x9b[+\x93\x13+\x93", (unsigned char*)(crypt_out[index+i]), &aeskey); AES_encrypt((unsigned char*)(crypt_out[index+i]), (unsigned char*)&crypt_out[index+i][4], &aeskey); #ifdef SSE_GROUP_SZ_SHA1 } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (crypt_out[index][0] == *(uint32_t*)binary) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } struct fmt_main fmt_krb5_18 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, kinit_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact, } }; #endif /* plugin stanza */
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_columns </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // </li> // <li> \ref custom_operations </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref openmp_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref configuration_files </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref custom_data_types </li> // <li> \ref error_reporting_customization </li> // <li> \ref intra_statement_optimization </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // In order for \b Blaze to work properly, the Boost library must be installed on the system. It // is recommended to use the newest Boost library available, but \b Blaze requires at minimum the // Boost version 1.54.0. If you don't have Boost installed on your system, you can download it for // free from 'http://www.boost.org'. // // Additionally, for maximum performance \b Blaze expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Furthermore, for computing the determinant of a dense matrix and for the dense matrix inversion // \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either of // these features is used it is necessary to link the LAPACK library to the final executable. If // no LAPACK library is available the use of these features will result in a linker error. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_installation_unix Linux/MacOSX User // // The first step is the installation of the header files. Since \b Blaze only consists of header // files, the <tt>./blaze</tt> subdirectory can be simply copied to a standard include directory // (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Windows User // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects of // \b Blaze can be adapted to specific requirements, environments and architectures by customizing // the header files in the <tt>./blaze/config/</tt> subdirectory. Since the default settings are // reasonable for most systems this step can also be skipped. However, in order to achieve maximum // performance a customization of at least the following configuration files is required: // // - <b><tt>./blaze/config/BLAS.h</tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt>./blaze/config/CacheSize.h</tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt>./blaze/config/Thresholds.h</tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays typedef CustomVector<int,unaligned,unpadded,columnVector> UnalignedUnpadded; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays typedef CustomVector<float,unaligned,padded,columnVector> UnalignedPadded; UnalignedPadded b( new float[16], 9UL, 16UL, blaze::ArrayDelete() ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays typedef CustomVector<double,aligned,unpadded,rowVector> AlignedUnpadded; AlignedUnpadded c( blaze::allocate<double>( 7UL ), 7UL, blaze::Deallocate() ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays typedef CustomVector<complex<double>,aligned,padded,columnVector> AlignedPadded; AlignedPadded d( allocate< complex<double> >( 8UL ), 5UL, 8UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // When constructing a custom vector there are two choices: Either a user manually manages the // array of elements outside the custom vector, or alternatively passes the responsibility for // the memory management to an instance of CustomVector. In the second case the CustomVector // class employs shared ownership between all copies of the custom vector, which reference the // same array. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::columnVector; using blaze::rowVector; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom row vector with size 3 for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom vector by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom vector. CustomVector<int,unaligned,unpadded,rowVector> b( new int[3], 3UL, ArrayDelete() ); // Definition of a custom vector with size 3 and capacity 16 with aligned and padded // integer array. The memory management is passed to the custom vector by providing a // deleter of type 'blaze::Deallocate'. CustomVector<int,aligned,padded> c( allocate<int>( 16UL ), 3UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; typedef CustomVector<int,unaligned,unpadded> CustomType; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affect vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. In case a deleter has been provided to the first // custom vector, both vectors share the responsibility to destroy the array when the last vector // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unpadded; int* array = blaze::allocate<int>( 5UL ); // Needs to be 32-bit aligned CustomVector<int,aligned,unpadded> a( array, 5UL, Deallocate() ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomVector<double,aligned,padded> CustomType; // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType b( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType c( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomVector<double,aligned,unpadded> CustomType; // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType b( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType c( allocate<double>( 3UL ), 3UL, Deallocate() ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is a multiple of the SIMD vector width. In case of unaligned // padded vectors \f$ N-1 \f$ additional padding elements are required, where \f$ N \f$ is // the SIMD vector width. In case the padding is insufficient with respect to the available // instruction set, a \c std::invalid_argument exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense vector classes can be directly initialized by means of an initializer // list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; \endcode // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v16( v7 ); // Instantiation of the dense column vector v16 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v17( v9 ); // Instantiation of the dense row vector v17 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v18( v1 ); // Instantiation of the sparse column vector v18 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v19( v12 ); // Instantiation of the sparse row vector v19 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v23( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v24( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense vector: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 4.0, -1.7, 8.6, -7.2 }; \endcode // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore, all // vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. The first // option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the vector // the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In contrast // to the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_member_functions Member Functions // <hr> // // \subsection vector_operations_size .size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are not preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code typedef blaze::DynamicVector<int,rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v1( 10UL ); // Creating a dynamic vector of size 10 SubvectorType sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // // \n \section vector_operations_free_functions Free Functions // <hr> // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, rows, and columns) is in default state if all its elements are in default state. // For instance, in case the vector is instantiated for a built-in integral or floating point data // type, the function returns \c true in case all vector elements are 0 and \c false in case any // vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse vector, respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, 4 }; blaze::StaticVector<int,4UL,rowVector> b{ -5, 2, -7, -4 }; min( a ); // Returns -5 min( b ); // Returns -7 max( a ); // Returns 7 max( b ); // Returns 2 \endcode // In case the vector currently has a size of 0, both functions return 0. Additionally, in case // a given sparse vector is not completely filled, the zero elements are taken into account. For // example: the following compressed vector has only 2 non-zero elements. However, the minimum // of this vector is 0: \code blaze::CompressedVector<int> c( 4UL, 2UL ); c[0] = 1; c[2] = 3; min( c ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor() \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_clip clip() // // The \c clip() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clip( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = pow( a, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // vectors. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicVector<double> a, b; b = forEach( a, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length of a vector, both the \c length() and \c sqrLength() function // can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_transpose trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_conjugate_transpose ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays typedef CustomMatrix<int,unaligned,unpadded,rowMajor> UnalignedUnpadded; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays typedef CustomMatrix<float,unaligned,padded,columnMajor> UnalignedPadded; UnalignedPadded B( new float[40], 5UL, 6UL, 8UL, blaze::ArrayDelete() ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays typedef CustomMatrix<double,aligned,unpadded,rowMajor> AlignedUnpadded; AlignedUnpadded C( blaze::allocate<double>( 192UL ), 12UL, 13UL, 16UL, blaze::Deallocate ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays typedef CustomMatrix<complex<double>,aligned,padded,columnMajor> AlignedPadded; AlignedPadded D( blaze::allocate<double>( 112UL ), 7UL, 14UL, 16UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // When constructing a custom matrix there are two choices: Either a user manually manages the // array of elements outside the custom matrix, or alternatively passes the responsibility for // the memory management to an instance of CustomMatrix. In the second case the CustomMatrix // class employs shared ownership between all copies of the custom matrix, which reference the // same array. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a 3x4 custom row-major matrix for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom matrix by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom matrix. CustomMatrix<int,unaligned,unpadded,rowMajor> B( new int[12], 3UL, 4UL, ArrayDelete() ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). The memory management is passed // to the custom matrix by providing a deleter of type 'blaze::Deallocate'. CustomMatrix<int,aligned,padded> C( allocate<int>( 128UL ), 8UL, 12UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; typedef CustomMatrix<int,unaligned,unpadded> CustomType; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affect matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. In case a deleter has been provided to the first // custom matrix, both matrices share the responsibility to destroy the array when the last matrix // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; int* array = blaze::allocate<int>( 40UL ); // Is guaranteed to be 32-bit aligned CustomMatrix<int,aligned,padded,rowMajor> A( array, 5UL, 6UL, 8UL, Deallocate() ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomMatrix<double,aligned,padded> CustomType; // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomMatrix<double,aligned,unpadded> CustomType; // Creating unpadded custom 3x3 matrix CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense matrix classes can be directly initialized by means of an initializer // list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; \endcode // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M15( M6 ); // Instantiation of the dense row-major matrix M15 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M16( M8 ); // Instantiation of the dense column-major matrix M16 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M17( M7 ); // Instantiation of the compressed column-major matrix // M17 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M18( M8 ); // Instantiation of the compressed row-major matrix // M18 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M19( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M20( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense matrix: \code blaze::DynamicMatrix<double> M; M = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; \endcode // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore, // all matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. The first possibility to add elements to a sparse matrix is the function call // operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the matrix // the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In // contrast to the function call operator and the \c set() function it emits an exception in case // the element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row. Additionally, the // index of the new element must be larger than the index of the previous element in the same // row. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.finalize( 0 ); // Finalizing row 0 M1.append( 1, 1, 2 ); // Appending the value 2 in row 1 with column index 1 M1.finalize( 1 ); // Finalizing row 1 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \n \section matrix_operations_member_functions Member Functions // <hr> // // \subsection matrix_operations_rows .rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \n \subsection matrix_operations_capacity .capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType M1( 10UL, 20UL ); // Creating a 10x20 matrix RowType row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \section matrix_operations_free_functions Free Functions // <hr> // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse matrix, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -5, 2, 7 }, { 4, 0, 1 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B{ { -5, 2, -7 }, { -4, 0, -1 } }; min( A ); // Returns -5 min( B ); // Returns -7 max( A ); // Returns 7 max( B ); // Returns 2 \endcode // In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in // case a given sparse matrix is not completely filled, the zero elements are taken into account. // For example: the following compressed matrix has only 2 non-zero elements. However, the minimum // of this matrix is 0: \code blaze::CompressedMatrix<int> C( 2UL, 3UL ); C(0,0) = 1; C(0,2) = 3; min( C ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_clip clip() // // The \c clip() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clip( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = pow( A, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // matrices. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection matrix_operations_matrix_transpose trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // Additionally, matrices can be transposed in-place via the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_conjugate_transpose ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // when calling the \c invert() function: \code using blaze::asGeneral; using blaze::asSymmetric; using blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; invert<asGeneral> ( A ); // In-place inversion of a general matrix invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix invert<asLower> ( A ); // In-place inversion of a lower triangular matrix invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomSymmetric; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array CustomSymmetric B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; CompressedMatrix<float> E( 3, 3 ); // Empty row-major sparse single precision 3x3 matrix SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > F; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > G; F = A + B; // Matrix addition and assignment to a row-major symmetric matrix G = A - C; // Matrix subtraction and assignment to a column-major symmetric matrix G = A * E; // Matrix multiplication between a dense and a sparse matrix A *= 2.0; // In-place scaling of matrix A F = 2.0 * B; // Scaling of matrix B G = E * 2.0; // Scaling of matrix E F += A - B; // Addition assignment G -= A + C; // Subtraction assignment G *= A * E; // Multiplication assignment \endcode // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef SymmetricMatrix< DynamicMatrix<double,columnMajor> > DynamicSymmetric; DynamicSymmetric A( 10UL ); Row<DynamicSymmetric> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomHermitian; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array CustomHermitian B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; typedef complex<float> cplx; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef HermitianMatrix< DynamicMatrix<double,columnMajor> > DynamicHermitian; DynamicHermitian A( 10UL ); // Both Hermitian and symmetric Row<DynamicHermitian> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; typedef LowerMatrix< CompressedMatrix<double,rowMajor> > CompressedLower; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomLower; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array CustomLower B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices typedef blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> > SmallDiagonalMatrix; // Recommendation 2: use sparse matrices for large diagonal matrices typedef blaze::DiagonalMatrix< blaze::CompressedMatrix<float> > LargeDiagonalMatrix; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // and blaze::UpperMatrix within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); UpperMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; UpperMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix F = C - D; // Matrix subtraction and assignment to a column-major upper matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // Note that diagonal, unitriangular and strictly triangular matrix types can be used in the same // way, but may pose some additional restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row or column of a matrix. As such, views act as a reference to a specific part of a vector // or matrix. This reference is valid and can be used in every way as any other vector or matrix // can be used as long as the referenced vector or matrix is not resized or entirely destroyed. // Views also act as alias to the elements of the vector or matrix: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) via the view are immediately visible in // the vector or matrix and changes made via the vector or matrix are immediately visible in the // view. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_columns // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix // // ( 1 0 -2 3 0 ) // ( 0 2 5 -1 -1 ) // ( 1 0 0 2 1 ) // DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector // // ( 18 19 ) // StaticVector<int,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_class The Subvector Class Template // <hr> // // The blaze::Subvector class template represents a view on a specific subvector of a dense or // sparse vector primitive. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The type of the vector is specified via two template parameters: \code template< typename VT, bool AF > class Subvector; \endcode // - \c VT: specifies the type of the vector primitive. Subvector can be used with every vector // primitive or view, but does not work with any vector expression type. // - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. This view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. A subvector created from a row // vector can be used as any other row vector, a subvector created from a column vector can be // used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector // of a vector primitive on the left-hand side of an assignment or to grant read-access to a // specific subvector of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<int,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2; SparseVectorType s1, s2; // ... Resizing and initialization // Creating a view on the first ten elements of the dense vector d1 blaze::Subvector<DenseVectorType> dsv = subvector( d1, 0UL, 10UL ); // Creating a view on the second ten elements of the sparse vector s1 blaze::Subvector<SparseVectorType> ssv = subvector( s1, 10UL, 10UL ); // Creating a view on the addition of d2 and s2 dsv = subvector( d2 + s2, 5UL, 10UL ); // Creating a view on the multiplication of d2 and s2 ssv = subvector( d2 * s2, 2UL, 10UL ); \endcode // The \c subvector() function can be used on any dense or sparse vector, including expressions, // as demonstrated in the example. Note however that a blaze::Subvector can only be instantiated // with a dense or sparse vector primitive, i.e. with types that can be written, and not with an // expression type. // // // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. For instance, the current // number of elements can be obtained via the \c size() function, the current capacity via the // \c capacity() function, and the number of non-zero elements via the \c nonZeros() function. // However, since subvectors are references to a specific range of a vector, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense subvector view: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v SubvectorType sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector SubvectorType sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an Iterator, which allows a manipulation of the non-zero values, in case // of constant subvectors a ConstIterator is returned: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the dense vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code typedef blaze::CompressedVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the sparse vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v( 256UL ); // Non-initialized vector of size 256 typedef blaze::Subvector<VectorType> SubvectorType; SubvectorType sv( subvector( v, 10UL, 60UL ) ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the vector it is inserted into the vector, if it is already contained // in the vector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2, d3; SparseVectorType s1, s2; // ... Resizing and initialization typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; typedef blaze::Subvector<DenseVectorType> SubvectorType; SubvectorType dsv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 dsv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = dsv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = dsv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= dsv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; DenseVectorType x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] blaze::Subvector<DenseVectorType> sv1 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType> sv2 = subvector<unaligned>( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv3 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv4 = subvector<unaligned>( x, 8UL, 16UL ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned dense subvector in the range [8..23] blaze::Subvector<DenseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; using blaze::columnVector; typedef blaze::DynamicVector<double,columnVector> VectorType; typedef blaze::Subvector<VectorType,aligned> SubvectorType; VectorType d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned SubvectorType dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned SubvectorType dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element SubvectorType dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned SubvectorType dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; SparseVectorType x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] blaze::Subvector<SparseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // \n \section views_subvectors_on_subvectors Subvectors on Subvectors // <hr> // // It is also possible to create a subvector view on another subvector. In this context it is // important to remember that the type returned by the \c subvector() function is the same type // as the type of the given subvector, not a nested subvector type, since the view on a subvector // is just another view on the underlying vector: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType d1; // ... Resizing and initialization // Creating a subvector view on the dense vector d1 SubvectorType sv1 = subvector( d1, 5UL, 10UL ); // Creating a subvector view on the dense subvector sv1 SubvectorType sv2 = subvector( sv1, 1UL, 5UL ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_class The Submatrix Class Template // <hr> // // The blaze::Submatrix class template represents a view on a specific submatrix of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The type of the matrix is specified via two template parameters: \code template< typename MT, bool AF > class Submatrix; \endcode // - \c MT: specifies the type of the matrix primitive. Submatrix can be used with every matrix // primitive, but does not work with any matrix expression type. // - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a submatrix can be created very conveniently via the \c submatrix() function. // This view can be treated as any other matrix, i.e. it can be assigned to, it can be copied // from, and it can be used in arithmetic operations. A submatrix created from a row-major // matrix will itself be a row-major matrix, a submatrix created from a column-major matrix // will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedVector<int,blaze::columnMajor> SparseMatrixType; DenseMatrixType D1, D2; SparseMatrixType S1, S2; // ... Resizing and initialization // Creating a view on the first 8x16 block of the dense matrix D1 blaze::Submatrix<DenseMatrixType> dsm = submatrix( D1, 0UL, 0UL, 8UL, 16UL ); // Creating a view on the second 8x16 block of the sparse matrix S1 blaze::Submatrix<SparseMatrixType> ssm = submatrix( S1, 0UL, 16UL, 8UL, 16UL ); // Creating a view on the addition of D2 and S2 dsm = submatrix( D2 + S2, 5UL, 10UL, 8UL, 16UL ); // Creating a view on the multiplication of D2 and S2 ssm = submatrix( D2 * S2, 7UL, 13UL, 8UL, 16UL ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // The current size of the matrix, i.e. the number of rows or columns can be obtained via the // \c rows() and \c columns() functions, the current total capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since submatrices // are views on a specific submatrix of a matrix, several operations are not possible on views, // such as resizing and swapping: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A; // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A SubmatrixType sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix SubmatrixType sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an Iterator, // which allows a manipulation of the non-zero values, in case of constant submatrices a // ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the dense matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code typedef blaze::CompressedMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the sparse matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 typedef blaze::Submatrix<MatrixType> SubmatrixType; SubmatrixType sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in case of sparse matrices, elements can also be inserted via the append() function. // In case of submatrices, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index in the according row or column of the // submatrix and that the according row's or column's capacity is large enough to hold the new // element. Note however that due to the nature of a submatrix, which may be an alias to the // middle of a sparse matrix, the append() function does not work as efficiently for a // submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; DenseMatrixType D1, D2, D3; SparseMatrixType S1, S2; typedef blaze::CompressedVector<double,blaze::columnVector> SparseVectorType; SparseVectorType a, b; // ... Resizing and initialization typedef Submatrix<DenseMatrixType> SubmatrixType; SubmatrixType sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType> sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType> sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm3 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm4 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; using blaze::rowMajor; typedef blaze::DynamicMatrix<double,rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType,aligned> SubmatrixType; MatrixType D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column SubmatrixType dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned SubmatrixType dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; SparseMatrixType A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<SparseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_submatrices Submatrices on Submatrices // <hr> // // It is also possible to create a submatrix view on another submatrix. In this context it is // important to remember that the type returned by the \c submatrix() function is the same type // as the type of the given submatrix, since the view on a submatrix is just another view on the // underlying matrix: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType D1; // ... Resizing and initialization // Creating a submatrix view on the dense matrix D1 SubmatrixType sm1 = submatrix( D1, 4UL, 4UL, 8UL, 16UL ); // Creating a submatrix view on the dense submatrix sm1 SubmatrixType sm2 = submatrix( sm1, 1UL, 1UL, 4UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Submatrix; typedef SymmetricMatrix< DynamicMatrix<int> > SymmetricDynamicType; typedef Submatrix< SymmetricDynamicType > SubmatrixType; // Setup of a 16x16 symmetric matrix SymmetricDynamicType A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 SubmatrixType sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_class The Row Class Template // <hr> // // The blaze::Row class template represents a reference to a specific row of a dense or sparse // matrix primitive. It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Row; \endcode // \c MT specifies the type of the matrix primitive. Row can be used with every matrix primitive, // but does not work with any matrix expression type. // // // \n \section views_rows_setup Setup of Rows // <hr> // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // This reference can be treated as any other row vector, i.e. it can be assigned to, it can be // copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,rowVector> DenseVectorType; typedef blaze::CompressedVector<double,rowVector> SparseVectorType; typedef blaze::DynamicMatrix<double,rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,rowMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x blaze::Row<DenseMatrixType> row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // The \c row() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, rows cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. For instance, the current number of elements // can be obtained via the \c size() function, the current capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since rows are // references to specific rows of a matrix, several operations are not possible on views, such // as resizing and swapping. The following example shows this by means of a dense row view: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A RowType row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix RowType row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of the row can be directly accessed with the subscript operator. The numbering // of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of // a row can be traversed via iterators. Just as with vectors, in case of non-const rows, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant row a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } for( RowType::ConstIterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 10UL, 100UL ); // Non-initialized 10x100 matrix typedef blaze::Row<MatrixType> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrix; DenseMatrix A( 4UL, 2UL ); // Non-initialized 4x2 matrix typedef blaze::Row<DenseMatrix> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st row of a column-major matrix A RowType row1 = row( A, 1UL ); for( RowType::Iterator it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices CompressedMatrix<double,columnMajor> A( 128UL, 128UL ); CompressedMatrix<double,columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... CompressedVector<double,rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. CompressedVector<double,rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_columns_class The Column Class Template // <hr> // // The blaze::Column class template represents a reference to a specific column of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Column; \endcode // \c MT specifies the type of the matrix primitive. Column can be used with every matrix // primitive, but does not work with any matrix expression type. // // // \n \section views_colums_setup Setup of Columns // <hr> // // Similar to the setup of a row, a reference to a dense or sparse column can be created very // conveniently via the \c column() function. This reference can be treated as any other column // vector, i.e. it can be assigned to, copied from, and be used in arithmetic operations. The // column can either be used as an alias to grant write access to a specific column of a matrix // primitive on the left-hand side of an assignment or to grant read-access to a specific column // of a matrix primitive or expression on the right-hand side of an assignment. The following // two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,columnVector> DenseVectorType; typedef blaze::CompressedVector<double,columnVector> SparseVectorType; typedef blaze::DynamicMatrix<double,columnMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,columnMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x blaze::Column<DenseMatrixType> col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // The \c column() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, columns cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. For instance, the current number of // elements can be obtained via the \c size() function, the current capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // columns are references to specific columns of a matrix, several operations are not possible on // views, such as resizing and swapping. The following example shows this by means of a dense // column view: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A ColumnType col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix ColumnType col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of the column can be directly accessed with the subscript operator. The numbering // of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of // a column can be traversed via iterators. Just as with vectors, in case of non-const columns, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant column a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } for( ColumnType::ConstIterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 100UL, 10UL ); // Non-initialized 10x100 matrix typedef blaze::Column<MatrixType> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 2UL, 4UL ); // Non-initialized 2x4 matrix typedef blaze::Column<DenseMatrix> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st column of a row-major matrix A ColumnType col1 = column( A, 1UL ); for( ColumnType::Iterator it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices CompressedMatrix<double,rowMajor> A( 128UL, 128UL ); CompressedMatrix<double,rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... CompressedVector<double,columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th column of the row-major matrix B with A. CompressedVector<double,columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix B would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // The matrix/matrix multiplication can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. Note however that the highest performance for a multiplication between two dense // matrices can be expected for two matrices with the same scalar element type. // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref custom_operations */ //************************************************************************************************* //**Custom Operations****************************************************************************** /*!\page custom_operations Custom Operations // // In addition to the provided operations on vectors and matrices it is possible to define custom // operations. For this purpose, \b Blaze provides the \c forEach() function, which allows to pass // the required operation via functor or lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ){ return std::sqrt( d ); } ); \endcode // This example demonstrates the most convenient way of defining a custom operation by passing a // lambda to the \c forEach() function. The lambda is executed on each single element of a dense // vector or matrix or each non-zero element of a sparse vector or matrix. // // Alternatively, it is possible to pass a custom functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = forEach( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c forEach() it must define a function call // operator, which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::cint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::cuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::cint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::cuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::cint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::cuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::cint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::cuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::cfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::cdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } simd_double_t load( simd_double_t a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( T a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_TYPE( T ); return sqrt( a ); } }; } // namespace blaze \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // One of the main motivations of the \b Blaze 1.x releases was to achieve maximum performance // on a single CPU core for all possible operations. However, today's CPUs are not single core // anymore, but provide several (homogeneous or heterogeneous) compute cores. In order to fully // exploit the performance potential of a multicore CPU, computations have to be parallelized // across all available cores of a CPU. For this purpose, \b Blaze provides three different // shared memory parallelization techniques: // // - \ref openmp_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // // In addition, \b Blaze provides means to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref custom_operations &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11 // threads are enabled on the command line, the OpenMP-based parallelization has priority and // is preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based on Boost // threads. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations // have priority, i.e. are preferred in case either is enabled in combination with the Boost // thread parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dotu( int n, const float* x, int incX, const float* y, int incY ); double dotu( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotu( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotu( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): \code namespace blaze { float dotc( int n, const float* x, int incX, const float* y, int incY ); double dotc( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotc( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotc( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()): \code namespace blaze { void axpy( int n, float alpha, const float* x, int incX, float* y, int incY ); void axpy( int n, double alpha, const double* x, int incX, double* y, int incY ); void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, complex<float>* y, int incY ); void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if the fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref configuration_files \n */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it might necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag of the vectors). Via the \c defaultTransposeFlag // value the default transpose flag for all vector of the \b Blaze library can be specified: \code constexpr bool defaultTransposeFlag = columnVector; \endcode // Valid settings for the \c defaultTransposeFlag are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c defaultStorageOrder value the default storage order for all // matrices of the \b Blaze library can be specified. \code constexpr bool defaultStorageOrder = rowMajor; \endcode // Valid settings for the \c defaultStorageOrder are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code constexpr size_t cacheSize = 3145728UL; \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // MIC intrinsics, depending on which instruction set is available. However, it is possible to // disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code constexpr bool usePadding = true; \endcode // If \c usePadding is set to \c true padding is enabled for all dense vectors and matrices, if // it is set to \c false padding is disabled. Note however that disabling padding can considerably // reduce the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code constexpr bool useStreaming = true; \endcode // If \c useStreaming is set to \c true streaming is enabled, if it is set to \c false streaming // is disabled. It is recommended to consult the target architecture's white papers to decide // whether streaming is beneficial or hurtful for performance. // // // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_example Example // <hr> // // The following example demonstrates a complete multiplication between a statically sized block // matrix and block vector: \code // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) typedef StaticMatrix<int,2UL,2UL,rowMajor> M2x2; typedef StaticVector<int,2UL,columnVector> V2; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) } { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_data_types \n */ //************************************************************************************************* //**Custom Data Types****************************************************************************** /*!\page custom_data_types Custom Data Types // // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct SubTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct MultTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct DivTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref error_reporting_customization \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref custom_data_types &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref error_reporting_customization */ //************************************************************************************************* #endif
LinkedCellsReferences.h
/** * @file LinkedCells.h * @date 05.04.2020 * @author lunaticcoding */ #pragma once #include "autopas/cells/ReferenceParticleCell.h" #include "autopas/containers/CellBasedParticleContainer.h" #include "autopas/containers/CellBlock3D.h" #include "autopas/containers/CompatibleTraversals.h" #include "autopas/containers/LoadEstimators.h" #include "autopas/containers/cellPairTraversals/BalancedTraversal.h" #include "autopas/containers/linkedCells/ParticleVector.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/iterators/RegionParticleIterator.h" #include "autopas/options/DataLayoutOption.h" #include "autopas/options/LoadEstimatorOption.h" #include "autopas/particles/OwnershipState.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/ParticleCellHelpers.h" #include "autopas/utils/StringUtils.h" #include "autopas/utils/WrapOpenMP.h" #include "autopas/utils/inBox.h" namespace autopas { /** * LinkedCells class. * This class uses a list of neighboring cells to store the particles. * These cells dimensions are at least as large as the given cutoff radius, * therefore short-range interactions only need to be calculated between * particles in neighboring cells. * @tparam ParticleCell type of the ParticleCells that are used to store the particles * @tparam SoAArraysType type of the SoA, needed for verlet lists */ template <class Particle> class LinkedCellsReferences : public CellBasedParticleContainer<ReferenceParticleCell<Particle>> { public: /** * Type of the Particle. */ using ParticleType = Particle; /** * Type of the ParticleCell. */ using ReferenceCell = ReferenceParticleCell<Particle>; /** * Constructor of the LinkedCells class * @param boxMin * @param boxMax * @param cutoff * @param skin * @param cellSizeFactor cell size factor relative to cutoff * @param loadEstimator the load estimation algorithm for balanced traversals. * By default all applicable traversals are allowed. */ LinkedCellsReferences(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff, const double skin, const double cellSizeFactor = 1.0, LoadEstimatorOption loadEstimator = LoadEstimatorOption::squaredParticlesPerCell) : CellBasedParticleContainer<ReferenceCell>(boxMin, boxMax, cutoff, skin), _cellBlock(this->_cells, boxMin, boxMax, cutoff + skin, cellSizeFactor), _loadEstimator(loadEstimator) {} /** * @copydoc ParticleContainerInterface::getContainerType() */ [[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::linkedCellsReferences; } /** * @copydoc ParticleContainerInterface::getParticleCellTypeEnum() */ [[nodiscard]] CellType getParticleCellTypeEnum() override { return CellType::ReferenceParticleCell; } /** * @copydoc ParticleContainerInterface::addParticleImpl() */ void addParticleImpl(const ParticleType &p) override { ParticleType pCopy = p; addParticleLock.lock(); _particleList.push_back(pCopy); updateDirtyParticleReferences(); addParticleLock.unlock(); } /** * @copydoc ParticleContainerInterface::addHaloParticleImpl() */ void addHaloParticleImpl(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); addParticleLock.lock(); _particleList.push_back(pCopy); updateDirtyParticleReferences(); addParticleLock.unlock(); } /** * @copydoc ParticleContainerInterface::updateHaloParticle() */ bool updateHaloParticle(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); auto cells = _cellBlock.getNearbyHaloCells(pCopy.getR(), this->getSkin()); for (auto cellptr : cells) { bool updated = internal::checkParticleInCellAndUpdateByID(*cellptr, pCopy); if (updated) { return true; } } AutoPasLog(trace, "UpdateHaloParticle was not able to update particle: {}", pCopy.toString()); return false; } /** * @copydoc ParticleContainerInterface::deleteHaloParticles() */ void deleteHaloParticles() override { _particleList.clearHaloParticles(); _cellBlock.clearHaloCells(); } /** * Generates the load estimation function depending on _loadEstimator. * @return load estimator function object. */ BalancedTraversal::EstimatorFunction getLoadEstimatorFunction() { switch (this->_loadEstimator) { case LoadEstimatorOption::squaredParticlesPerCell: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return loadEstimators::squaredParticlesPerCell(this->_cells, cellsPerDimension, lowerCorner, upperCorner); }; } case LoadEstimatorOption::none: [[fallthrough]]; default: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return 1; }; } } } /** * @copydoc ParticleContainerInterface::rebuildNeighborLists() */ void rebuildNeighborLists(TraversalInterface *traversal) override { updateDirtyParticleReferences(); } /** * Updates all the References in the cells that are out of date. * @note Removes all dummy particles. */ void updateDirtyParticleReferences() { if (_particleList.isDirty()) { if (_particleList.needsRebuild()) { for (auto &cell : this->_cells) { cell.clear(); } } for (auto it = _particleList.beginDirty(); it < _particleList.endDirty(); it++) { if (it->isDummy()) { continue; } ReferenceCell &cell = _cellBlock.getContainingCell(it->getR()); auto address = &(*it); cell.addParticleReference(address); } _particleList.markAsClean(); } } void iteratePairwise(TraversalInterface *traversal) override { // Check if traversal is allowed for this container and give it the data it needs. auto *traversalInterface = dynamic_cast<LCTraversalInterface<ReferenceCell> *>(traversal); auto *cellPairTraversal = dynamic_cast<CellPairTraversal<ReferenceCell> *>(traversal); if (auto *balancedTraversal = dynamic_cast<BalancedTraversal *>(traversal)) { balancedTraversal->setLoadEstimator(getLoadEstimatorFunction()); } if (traversalInterface && cellPairTraversal) { cellPairTraversal->setCellsToTraverse(this->_cells); } else { autopas::utils::ExceptionHandler::exception( "Trying to use a traversal of wrong type in LinkedCellsReferences::iteratePairwise. TraversalID: {}", traversal->getTraversalType()); } traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } std::vector<ParticleType> updateContainer() override { this->deleteHaloParticles(); std::vector<ParticleType> invalidParticles; #ifdef AUTOPAS_OPENMP #pragma omp parallel #endif // AUTOPAS_OPENMP { // private for each thread! std::vector<ParticleType> myInvalidParticles, myInvalidNotOwnedParticles; #ifdef AUTOPAS_OPENMP #pragma omp for #endif // AUTOPAS_OPENMP for (size_t cellId = 0; cellId < this->getCells().size(); ++cellId) { // Delete dummy particles of each cell. this->getCells()[cellId].deleteDummyParticles(); // if empty if (not this->getCells()[cellId].isNotEmpty()) continue; auto [cellLowerCorner, cellUpperCorner] = this->getCellBlock().getCellBoundingBox(cellId); for (auto &&pIter = this->getCells()[cellId].begin(); pIter.isValid(); ++pIter) { // if not in cell if (utils::notInBox(pIter->getR(), cellLowerCorner, cellUpperCorner)) { myInvalidParticles.push_back(*pIter); internal::deleteParticle(pIter); } } } // implicit barrier here // the barrier is needed because iterators are not threadsafe w.r.t. addParticle() // this loop is executed for every thread and thus parallel. Don't use #pragma omp for here! for (auto &&p : myInvalidParticles) { // if not in halo if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { this->template addParticle<false>(p); } else { myInvalidNotOwnedParticles.push_back(p); } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { // merge private vectors to global one. invalidParticles.insert(invalidParticles.end(), myInvalidNotOwnedParticles.begin(), myInvalidNotOwnedParticles.end()); } } _particleList.deleteDummyParticles(); updateDirtyParticleReferences(); return invalidParticles; } /** * @copydoc ParticleContainerInterface::getTraversalSelectorInfo() */ [[nodiscard]] TraversalSelectorInfo getTraversalSelectorInfo() const override { return TraversalSelectorInfo(this->getCellBlock().getCellsPerDimensionWithHalo(), this->getInteractionLength(), this->getCellBlock().getCellLength(), 0); } ParticleIteratorWrapper<ParticleType, true> begin( IteratorBehavior behavior = autopas::IteratorBehavior::ownedOrHalo) override { return ParticleIteratorWrapper<ParticleType, true>( new internal::ParticleIterator<ParticleType, ReferenceCell, true>(&this->_cells, 0, &_cellBlock, behavior, nullptr)); } ParticleIteratorWrapper<ParticleType, false> begin( IteratorBehavior behavior = autopas::IteratorBehavior::ownedOrHalo) const override { return ParticleIteratorWrapper<ParticleType, false>( new internal::ParticleIterator<ParticleType, ReferenceCell, false>(&this->_cells, 0, &_cellBlock, behavior, nullptr)); } ParticleIteratorWrapper<ParticleType, true> getRegionIterator(const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior) override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, true>( new internal::RegionParticleIterator<ParticleType, ReferenceCell, true>( &this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior, nullptr)); } ParticleIteratorWrapper<ParticleType, false> getRegionIterator(const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior) const override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, false>( new internal::RegionParticleIterator<ParticleType, ReferenceCell, false>( &this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior, nullptr)); } /** * Get the cell block, not supposed to be used except by verlet lists * @return the cell block */ internal::CellBlock3D<ReferenceCell> &getCellBlock() { return _cellBlock; } /** * @copydoc getCellBlock() * @note const version */ const internal::CellBlock3D<ReferenceCell> &getCellBlock() const { return _cellBlock; } /** * Returns reference to the data of LinkedCellsReferences * @return the data */ std::vector<ReferenceCell> &getCells() { return this->_cells; } /** * @copydoc getCells() * @note const version */ const std::vector<ReferenceCell> &getCells() const { return this->_cells; } protected: /** * object that stores the actual Particles and keeps track of the references. */ ParticleVector<ParticleType> _particleList; /** * object to manage the block of cells. */ internal::CellBlock3D<ReferenceCell> _cellBlock; /** * load estimation algorithm for balanced traversals. */ autopas::LoadEstimatorOption _loadEstimator; /** * Workaround for adding particles in parallel -> https://github.com/AutoPas/AutoPas/issues/555 */ AutoPasLock addParticleLock; }; } // namespace autopas
GB_unaryop__identity_int16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_fp32 // op(A') function: GB_tran__identity_int16_fp32 // C type: int16_t // A type: float // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int16_t z ; GB_CAST_SIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_fp32 ( int16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
set_BCs.h
#pragma omp target teams distribute parallel for thread_limit(BLOCK_SIZE) for (int ind = 1; ind < NUM+1; ind++) { // left boundary u(0, ind) = ZERO; v(0, ind) = -v(1, ind); // right boundary u(NUM, ind) = ZERO; v(NUM + 1, ind) = -v(NUM, ind); // bottom boundary u(ind, 0) = -u(ind, 1); v(ind, 0) = ZERO; // top boundary u(ind, NUM + 1) = TWO - u(ind, NUM); v(ind, NUM) = ZERO; if (ind == NUM) { // left boundary u(0, 0) = ZERO; v(0, 0) = -v(1, 0); u(0, NUM + 1) = ZERO; v(0, NUM + 1) = -v(1, NUM + 1); // right boundary u(NUM, 0) = ZERO; v(NUM + 1, 0) = -v(NUM, 0); u(NUM, NUM + 1) = ZERO; v(NUM + 1, NUM + 1) = -v(NUM, NUM + 1); // bottom boundary u(0, 0) = -u(0, 1); v(0, 0) = ZERO; u(NUM + 1, 0) = -u(NUM + 1, 1); v(NUM + 1, 0) = ZERO; // top boundary u(0, NUM + 1) = TWO - u(0, NUM); v(0, NUM) = ZERO; u(NUM + 1, NUM + 1) = TWO - u(NUM + 1, NUM); v(ind, NUM + 1) = ZERO; } // end if }
GB_unop__identity_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_int32_int32) // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-7,8)),ceild(4*t2-Nz-12,16));t3<=min(min(min(floord(4*t2+Ny,16),floord(Nt+Ny-4,16)),floord(2*t1+Ny+1,16)),floord(4*t1-4*t2+Nz+Ny-1,16));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(4*t2-Nz-124,128)),ceild(16*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t2+Nx,128),floord(Nt+Nx-4,128)),floord(2*t1+Nx+1,128)),floord(16*t3+Nx+12,128)),floord(4*t1-4*t2+Nz+Nx-1,128));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),16*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),16*t3+14),128*t4+126),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
ex01.c
#include <stdio.h> #include <omp.h> int main(int argv, char* argc) { #pragma omp parallel { int id = omp_get_thread_num(); printf ("Hello (%d) ", id); printf ("World (%d)!\n", id); } return 0; }
CALPHADFreeEnergyFunctionsBinary3Ph2Sl.h
#ifndef included_CALPHADFreeEnergyFunctionsBinary3Ph2Sl #define included_CALPHADFreeEnergyFunctionsBinary3Ph2Sl #include "CALPHADSpeciesPhaseGibbsEnergy.h" #include "InterpolationType.h" #include "Phases.h" #include "datatypes.h" #include "functions.h" #include <boost/property_tree/ptree.hpp> #include <cassert> #include <fstream> #include <iostream> #include <math.h> namespace Thermo4PFM { class CALPHADFreeEnergyFunctionsBinary3Ph2Sl { public: CALPHADFreeEnergyFunctionsBinary3Ph2Sl( boost::property_tree::ptree& input_db, boost::optional<boost::property_tree::ptree&> newton_db, const EnergyInterpolationType energy_interp_func_type, const ConcInterpolationType conc_interp_func_type); ~CALPHADFreeEnergyFunctionsBinary3Ph2Sl(){}; double computeFreeEnergy(const double temperature, const double* const conc, const PhaseIndex pi, const bool gp = false); void computeDerivFreeEnergy(const double temperature, const double* const conc, const PhaseIndex pi, double*); void computeSecondDerivativeFreeEnergy(const double temp, const double* const conc, const PhaseIndex pi, double* d2fdc2); bool computeCeqT(const double temperature, double* ceq, const int maxits = 20, const bool verbose = false); void preRunDiagnostics(const double T0 = 300., const double T1 = 3000.); int computePhaseConcentrations(const double temperature, const double* conc, const double* const phi, double* x); void energyVsPhiAndC(const double temperature, const double* const ceq, const bool found_ceq, const double phi_well_scale, const int npts_phi = 51, const int npts_c = 50); // # of compositions to use (>1) void printEnergyVsComposition( const double temperature, std::ostream& os, const int npts = 100); double fchem(const double* const phi, const double* const conc, const double temperature); void printEnergyVsPhiHeader(const double temperature, const int nphi, const int nc, const double cmin, const double cmax, const double slopec, std::ostream& os) const; void printEnergyVsPhi(const double* const conc, const double temperature, const double phi_well_scale, const int npts, const double slopec, std::ostream& os); void computeTdependentParameters(const double temperature, CalphadDataType* Lmix_L, CalphadDataType* Lmix_A, CalphadDataType* Lmix_B, CalphadDataType* fA, CalphadDataType* fB); private: EnergyInterpolationType energy_interp_func_type_; ConcInterpolationType conc_interp_func_type_; void readNewtonparameters(boost::property_tree::ptree& newton_db); char* fenergy_diag_filename_; double newton_tol_; double newton_alpha_; int newton_maxits_; bool newton_verbose_; // Single species energies in each phase // size 2 for species 0 and 1 CALPHADSpeciesPhaseGibbsEnergy g_species_phaseL_[2]; CALPHADSpeciesPhaseGibbsEnergy g_species_phaseA_[2]; CALPHADSpeciesPhaseGibbsEnergy g_species_phaseB_[2]; // size 4 for L0, L1, L2, L3, // can contain up to 3 coefficients a,b,c for a+b*T, // possibly +c*T*ln(T) if compiled with -DLMIX_WTLOGT CalphadDataType LmixPhaseL_[4][MAX_POL_T_INDEX]; CalphadDataType LmixPhaseA_[4][MAX_POL_T_INDEX]; CalphadDataType LmixPhaseB_[4][MAX_POL_T_INDEX]; int sublattice_stoichiometry_phaseL_[2]; int sublattice_stoichiometry_phaseA_[2]; int sublattice_stoichiometry_phaseB_[2]; double (*fun_ptr_arr_[3])(const double){ linear_interp_func, pbg_interp_func, harmonic_interp_func }; void readParameters(boost::property_tree::ptree& calphad_db); #ifdef HAVE_OPENMP_OFFLOAD #pragma omp declare target #endif // energy of species "is" in phase L,A double getFenergyPhaseL(const short is, const double temperature) { return g_species_phaseL_[is].fenergy(temperature); } double getFenergyPhaseA(const short is, const double temperature) { return g_species_phaseA_[is].fenergy(temperature); } double getFenergyPhaseB(const short is, const double temperature) { return g_species_phaseB_[is].fenergy(temperature); } CalphadDataType lmixPhase( const unsigned index, const PhaseIndex pi, const double temperature) { // assert(index < 4); switch (pi) { case PhaseIndex::phaseL: return LmixPhaseL_[index][0] + LmixPhaseL_[index][1] * temperature #ifdef LMIX_WTLOGT + LmixPhaseL_[index][2] * temperature * log(temperature) #endif ; case PhaseIndex::phaseA: return LmixPhaseA_[index][0] + LmixPhaseA_[index][1] * temperature #ifdef LMIX_WTLOGT + LmixPhaseA_[index][2] * temperature * log(temperature) #endif ; case PhaseIndex::phaseB: return LmixPhaseB_[index][0] + LmixPhaseB_[index][1] * temperature #ifdef LMIX_WTLOGT + LmixPhaseB_[index][2] * temperature * log(temperature) #endif ; default: return NAN; } } #ifdef HAVE_OPENMP_OFFLOAD #pragma omp end declare target #endif void computePhasesFreeEnergies(const double temperature, const double* const hphi, const double conc, double& fl, double& fa, double& fb); }; } #endif
sycl_vtypes.h
#pragma once #include <cassert> #include "lattice/constants.h" #include "dslash/dslash_defaults.h" #include "lattice/lattice_info.h" #include "dslash/dslash_complex.h" #include "dslash/dslash_vectype_sycl.h" #include "dslash/sycl_view.h" #include "dslash/dslash_vnode.h" #include "dslash/sycl_vneighbor_table.h" namespace MG { IndexArray block(IndexArray input, IndexArray block_factors){ IndexArray ret_val = input; for(int mu=0; mu < 4; ++mu ) { assert( ret_val[mu] % block_factors[mu] == 0); ret_val[mu]/= block_factors[mu]; } return ret_val; } template<typename T, typename VN, int _num_spins> class SyCLCBFineVSpinor { public: using VecType = SIMDComplexSyCL<typename BaseType<T>::Type, VN::VecLen>; using DataType = View<VecType,3,DefaultSpinorLayout>; template<cl::sycl::access::mode accessMode, cl::sycl::access::target accessTarget = cl::sycl::access::target::global_buffer> using DataAccessor = ViewAccessor<VecType,3,DefaultSpinorLayout,accessMode,accessTarget>; SyCLCBFineVSpinor(const LatticeInfo& info, IndexType cb) : _g_info(info), _cb(cb), _info(block(info.GetLatticeOrigin(),{ VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3 }), block(info.GetLatticeDimensions(),{ VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3 }), info.GetNumSpins(), info.GetNumColors(), info.GetNodeInfo()), _cb_data("cb_data", {_info.GetNumCBSites(),_num_spins,3}) { if( _g_info.GetNumColors() != 3 ) { MasterLog(ERROR, "CBFineSpinor has to have 3 colors in info. Info has %d", _g_info.GetNumColors()); } if( _g_info.GetNumSpins() != _num_spins ) { MasterLog(ERROR, "CBFineSpinor has to have %d spins in info. Info has %d", _num_spins,_g_info.GetNumSpins()); } } inline const LatticeInfo& GetInfo() const { return (_info); } inline const LatticeInfo& GetGlobalInfo() const { return _g_info; } inline IndexType GetCB() const { return _cb; } DataType GetData() const { return _cb_data; } DataType& GetData() { return _cb_data; } private: const LatticeInfo& _g_info; const IndexType _cb; LatticeInfo _info; DataType _cb_data; }; template<typename T, typename VN> using SyCLVSpinorView = typename SyCLCBFineVSpinor<T,VN,4>::DataType; template<typename T, typename VN, cl::sycl::access::mode accessMode, cl::sycl::access::target accessTarget = cl::sycl::access::target::global_buffer> using SyCLVSpinorViewAccessor = typename SyCLCBFineVSpinor<T,VN,4>::template DataAccessor<accessMode,accessTarget>; template<typename T, typename VN> using SyCLVHalfSpinorView = typename SyCLCBFineVSpinor<T,VN,2>::DataType; template<typename T, typename VN, cl::sycl::access::mode accessMode, cl::sycl::access::target accessTarget = cl::sycl::access::target::global_buffer> using SyCLVHalfSpinorViewAccessor = typename SyCLCBFineVSpinor<T,VN,2>::template DataAccessor<accessMode,accessTarget>; template<typename T, typename VN> class SyCLCBFineVGaugeField { public: using VecType = SIMDComplexSyCL<typename BaseType<T>::Type, VN::VecLen>; using DataType = View<VecType,4,DefaultGaugeLayout>; template<cl::sycl::access::mode accessMode, cl::sycl::access::target accessTarget = cl::sycl::access::target::global_buffer> using DataAccessor = ViewAccessor<VecType,4,DefaultGaugeLayout,accessMode,accessTarget>; SyCLCBFineVGaugeField(const LatticeInfo& info, IndexType cb) : _g_info(info), _cb(cb), _info(block(info.GetLatticeOrigin(),{ VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3 }), block(info.GetLatticeDimensions(),{ VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3 }), info.GetNumSpins(), info.GetNumColors(), info.GetNodeInfo()), _cb_data("cb_data", {_info.GetNumCBSites(), 4,3,3}) { if( _g_info.GetNumColors() != 3 ) { MasterLog(ERROR, "KokkosCBFineSpinor has to have 3 colors in info. Info has %d", _g_info.GetNumColors()); } } inline const LatticeInfo& GetInfo() const { return (_info); } inline const LatticeInfo& GetGlobalInfo() const { return _g_info; } inline IndexType GetCB() const { return _cb; } const DataType& GetData() const { return (*this)._cb_data; } DataType& GetData() { return (*this)._cb_data; } private: const LatticeInfo& _g_info; LatticeInfo _info; DataType _cb_data; const IndexType _cb; }; template<typename T, typename VN> class SyCLFineVGaugeField { private: const LatticeInfo& _info; SyCLCBFineVGaugeField<T,VN> _gauge_data_even; SyCLCBFineVGaugeField<T,VN> _gauge_data_odd; public: SyCLFineVGaugeField(const LatticeInfo& info) : _info(info), _gauge_data_even(info,EVEN), _gauge_data_odd(info,ODD) { } const SyCLCBFineVGaugeField<T,VN>& operator()(IndexType cb) const { return (cb == EVEN) ? _gauge_data_even : _gauge_data_odd; //return *(_gauge_data[cb]); } SyCLCBFineVGaugeField<T,VN>& operator()(IndexType cb) { return (cb == EVEN) ? _gauge_data_even : _gauge_data_odd; //return *(_gauge_data[cb]); } }; // Double copied gauge field. template<typename T, typename VN> class SyCLCBFineVGaugeFieldDoubleCopy { public: SyCLCBFineVGaugeFieldDoubleCopy(const LatticeInfo& info, IndexType cb) : _g_info(info), _cb(cb), _info(block(info.GetLatticeOrigin(),{ VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3 }), block(info.GetLatticeDimensions(),{ VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3 }), info.GetNumSpins(), info.GetNumColors(), info.GetNodeInfo()), _cb_data("cb_data", {_info.GetNumCBSites(),8,3,3}) { if( _g_info.GetNumColors() != 3 ) { MasterLog(ERROR, "KokkosCBFineSpinor has to have 3 colors in info. Info has %d", _g_info.GetNumColors()); } MasterLog(INFO, "Exiting Constructor"); } inline const LatticeInfo& GetInfo() const { return (_info); } inline const LatticeInfo& GetGlobalInfo() const { return _g_info; } inline IndexType GetCB() const { return _cb; } // Double Copied Gauge Field. using VecType = SIMDComplexSyCL<typename BaseType<T>::Type, VN::VecLen>; using DataType = View<VecType,4,DefaultGaugeLayout>; template<cl::sycl::access::mode accessMode, cl::sycl::access::target accessTarget = cl::sycl::access::target::global_buffer> using DataAccessor = ViewAccessor<VecType,4,DefaultGaugeLayout,accessMode,accessTarget>; const DataType& GetData() const { return _cb_data; } DataType& GetData() { return _cb_data; } void import(const SyCLCBFineVGaugeField<T,VN>& src_cb, const SyCLCBFineVGaugeField<T,VN>& src_othercb) { using InputType = typename SyCLCBFineVGaugeField<T,VN>::DataType; using FType = typename BaseType<T>::Type; // Sanity: src_cb has to match my CB if( GetCB() != src_cb.GetCB() ) { MasterLog(ERROR, "cb of src_cb does not match my cb in import()"); } // Sanity 2: othercb has to be the opposite CB from me. int expected_othercb = (src_cb.GetCB() == EVEN) ? ODD :EVEN; if( expected_othercb != src_othercb.GetCB() ) { MasterLog(ERROR, "cb of src_othercb is not opposite of mine in import()"); } // Grab a site table IndexArray cb_latdims = _info.GetCBLatticeDimensions(); SiteTable neigh_table_tab(cb_latdims[0],cb_latdims[1],cb_latdims[2], cb_latdims[3]); SiteTableAccess neigh_table=neigh_table_tab.template get_access<cl::sycl::access::mode::read>(); size_t num_cbsites = _info.GetNumCBSites(); InputType cb_data_in = src_cb.GetData().template get_access<cl::sycl::access::mode::read>(); InputType othercb_data_in = src_othercb.GetData().template get_access<cl::sycl::access::mode::read>(); int target_cb = _cb; #pragma omp parallel for for(size_t site = 0; site < num_cbsites; ++site) { IndexArray site_coords = LayoutLeft::coords(site,cb_latdims); std::size_t xcb = site_coords[0]; std::size_t y = site_coords[1]; std::size_t z = site_coords[2]; std::size_t t = site_coords[3]; size_t n_idx; bool do_permute; // T_minus neigh_table.NeighborTMinus(xcb,y,z,t,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,0,col,col2),VN::permuteT(othercb_data_in(n_idx,3,col,col2))); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,0,col,col2), othercb_data_in(n_idx,3,col,col2)); } } } // Z_minus neigh_table.NeighborZMinus(xcb,y,z,t,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,1,col,col2),VN::permuteZ(othercb_data_in(n_idx,2,col,col2))); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,1,col,col2), othercb_data_in(n_idx,2,col,col2)); } } } // Y_minus neigh_table.NeighborYMinus(xcb,y,z,t,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,2,col,col2),VN::permuteY(othercb_data_in(n_idx,1,col,col2))); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,2,col,col2),othercb_data_in(n_idx,1,col,col2)); } } } // X_minus neigh_table.NeighborXMinus(xcb,y,z,t,target_cb,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,3,col,col2),VN::permuteX(othercb_data_in(n_idx,0,col,col2))); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,3,col,col2),othercb_data_in(n_idx,0,col,col2)); } } } // X-plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,4,col,col2),cb_data_in(site,0,col,col2)); } } // Y_Plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,5,col,col2),cb_data_in(site,1,col,col2)); } } // Z_Plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,6,col,col2),cb_data_in(site,2,col,col2)); } } // T_Plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(_cb_data(site,7,col,col2),cb_data_in(site,3,col,col2)); } } } } private: const LatticeInfo& _g_info; LatticeInfo _info; DataType _cb_data; const IndexType _cb; }; template<typename T, typename VN> void import(SyCLCBFineVGaugeFieldDoubleCopy<T,VN>& target, SyCLCBFineVGaugeField<T,VN> src_cb, SyCLCBFineVGaugeField<T,VN> src_othercb) { using InputType = typename SyCLCBFineVGaugeField<T,VN>::DataType; using FType=typename BaseType<T>::Type; // Sanity: src_cb has to match my CB if( target.GetCB() != src_cb.GetCB() ) { MasterLog(ERROR, "cb of src_cb does not match my cb in import()"); } // Sanity 2: othercb has to be the opposite CB from me. int expected_othercb = (src_cb.GetCB() == EVEN) ? ODD :EVEN; if( expected_othercb != src_othercb.GetCB() ) { MasterLog(ERROR, "cb of src_othercb is not opposite of mine in import()"); } // Grab a site table const LatticeInfo& info = target.GetInfo(); IndexArray cb_latdims = info.GetCBLatticeDimensions(); MasterLog(INFO, "Double Storing Gauge: Info has size=(%d,%d,%d,%d)", cb_latdims[0],cb_latdims[1],cb_latdims[2],cb_latdims[3]); SiteTable neigh_table_tab(cb_latdims[0],cb_latdims[1],cb_latdims[2], cb_latdims[3]); auto neigh_table = neigh_table_tab.template get_access<cl::sycl::access::mode::read>(); auto cb_data_out = target.GetData().template get_access<cl::sycl::access::mode::write>(); auto cb_data_in = src_cb.GetData().template get_access<cl::sycl::access::mode::read>(); auto othercb_data_in= src_othercb.GetData().template get_access<cl::sycl::access::mode::read>(); int target_cb = target.GetCB(); // Lambd cannot access member _cb on host int num_cbsites = info.GetNumCBSites(); #pragma omp parallel for for(size_t site=0; site < num_cbsites; ++site) { IndexArray coord_array = LayoutLeft::coords(site,cb_latdims); const size_t xcb = coord_array[0]; const size_t y=coord_array[1]; const size_t z=coord_array[2]; const size_t t=coord_array[3]; size_t n_idx; bool do_permute; // T_minus neigh_table.NeighborTMinus(xcb,y,z,t,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,0,col,col2),VN::permuteT(othercb_data_in(n_idx,3,col,col2))); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,0,col,col2), othercb_data_in(n_idx,3,col,col2)); //cb_data_out(site,0,col,col2) = VN::permute(mask, othercb_data_in(n_idx,3,col,col2)); } } } // Z_minus neigh_table.NeighborZMinus(xcb,y,z,t,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,1,col,col2),VN::permuteZ(othercb_data_in(n_idx,2,col,col2))); //cb_data_out(site,1,col,col2) = VN::permute(mask, othercb_data_in(n_idx,2,col,col2)); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,1,col,col2),othercb_data_in(n_idx,2,col,col2)); //cb_data_out(site,1,col,col2) = VN::permute(mask, othercb_data_in(n_idx,2,col,col2)); } } } // Y_minus neigh_table.NeighborYMinus(xcb,y,z,t,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,2,col,col2),VN::permuteY(othercb_data_in(n_idx,1,col,col2))); //cb_data_out(site,2,col,col2) = VN::permute(mask, othercb_data_in(n_idx,1,col,col2)); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,2,col,col2),othercb_data_in(n_idx,1,col,col2)); //cb_data_out(site,2,col,col2) = VN::permute(mask, othercb_data_in(n_idx,1,col,col2)); } } } // X_minus neigh_table.NeighborXMinus(xcb,y,z,t,target_cb,n_idx,do_permute); if( do_permute ) { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,3,col,col2),VN::permuteX(othercb_data_in(n_idx,0,col,col2))); //cb_data_out(site,3,col,col2) = VN::permute(mask, othercb_data_in(n_idx,0,col,col2)); } } } else { for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,3,col,col2),othercb_data_in(n_idx,0,col,col2)); //cb_data_out(site,3,col,col2) = VN::permute(mask, othercb_data_in(n_idx,0,col,col2)); } } } // X-Plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,4,col,col2),cb_data_in(site,0,col,col2)); //cb_data_out(site,4,col,col2)=cb_data_in(site,0,col,col2); } } // Y_Plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,5,col,col2),cb_data_in(site,1,col,col2)); //cb_data_out(site,5,col,col2)=cb_data_in(site,1,col,col2); } } // Z_Plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,6,col,col2),cb_data_in(site,2,col,col2)); //cb_data_out(site,6,col,col2)=cb_data_in(site,2,col,col2); } } // T_Plus for(int col=0; col < 3; ++col) { for(int col2=0; col2 < 3; ++col2) { ComplexCopy<FType,VN::VecLen>(cb_data_out(site,7,col,col2),cb_data_in(site,3,col,col2)); //cb_data_out(site,7,col,col2)=cb_data_in(site,3,col,col2); } } }// parallel for } template<typename T,typename VN> using SyCLVGaugeView = typename SyCLCBFineVGaugeFieldDoubleCopy<T,VN>::DataType; template<typename T, typename VN, cl::sycl::access::mode accessMode, cl::sycl::access::target accessTarget = cl::sycl::access::target::global_buffer> using SyCLVGaugeViewAccessor = typename SyCLCBFineVGaugeFieldDoubleCopy<T,VN>::template DataAccessor<accessMode,accessTarget>; // Site views, these are for use inside kernels and should registerize data template<typename T,const int S, const int C> struct SiteView { T _data[S][C]; T& operator()(int color, int spin) { return _data[spin][color]; } const T& operator()(int color, int spin) const { return _data[spin][color]; } }; template<typename T> using SpinorSiteView = SiteView<T ,4,3>; template<typename T> using HalfSpinorSiteView = SiteView< T,2,3>; template<typename T> using GaugeSiteView = SiteView< T ,3,3>; }
GB_unaryop__abs_uint32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_uint8 // op(A') function: GB_tran__abs_uint32_uint8 // C type: uint32_t // A type: uint8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_uint8 ( uint32_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
add.c
/******************************************************************************* * Copyright 2021 UChicago Argonne, LLC. * (c.f. AUTHORS, LICENSE) * * This file is part of the NRM Benchmarks project. * For more info, see https://github.com/anlsys/nrm-benchmarks * * SPDX-License-Identifier: BSD-3-Clause ******************************************************************************/ #include "config.h" #include "nrm-benchmarks.h" #include <nrm.h> static double *a, *b, *c; static struct nrm_context *context; static struct nrm_scope *region_scope, **thread_scope; int main(int argc, char **argv) { /* configuration parameters: * - array size in number of double elements * - number of times to run through the benchmark */ size_t array_size; long int times; /* needed for performance measurement */ int64_t sumtime = 0, mintime = INT64_MAX, maxtime = 0; nrmb_time_t start, end; size_t memory_size; int num_threads; /* retrieve the size of the allocation and the number of time * to loop through the kernel. */ assert(argc == 3); array_size = strtoull(argv[1], NULL, 0); assert(!errno); times = strtol(argv[2], NULL, 0); assert(!errno); /* ensure that OpenMP is giving us the right number of threads */ #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); int err = 0; #pragma omp parallel #pragma omp atomic err++; assert(num_threads == err); err = 0; /* allocate the arrays and initialize them. Note that we expect the * first-touch policy of Linux to result in the arrays being properly * balanced between threads/numa-nodes */ memory_size = array_size * sizeof(double); a = malloc(memory_size); b = malloc(memory_size); c = malloc(memory_size); #pragma omp parallel for for(size_t i = 0; i < array_size; i++) { a[i] = 1.0; b[i] = 2.0; c[i] = 0.0; } /* NRM Context init */ context = nrm_ctxt_create(); nrm_init(context, argv[0], 0, 0); /* one run of the benchmark for free, warms up the memory */ #pragma omp parallel for for(size_t i = 0; i < array_size; i++) c[i] = a[i] + b[i]; /* this version of the benchmarks reports one progress each time it goes * through the entire array. */ /* Create scopes */ region_scope = nrm_scope_create(); thread_scope = malloc(num_threads*sizeof(nrm_scope_t*)); for (int i = 0; i < num_threads; i++) { thread_scope[i] = nrm_scope_create(); } /* Get master process scope */ nrm_scope_threadshared(region_scope); nrm_send_progress(context, 1, region_scope); for(long int iter = 0; iter < times; iter++) { int64_t time; nrmb_gettime(&start); /* the actual benchmark */ #pragma omp parallel for for(size_t i = 0; i < array_size; i++) { c[i] = a[i] + b[i]; /* Get scopes */ nrm_scope_threadshared(region_scope); nrm_scope_threadprivate(thread_scope[omp_get_thread_num()]); nrm_send_progress(context, 1, thread_scope[omp_get_thread_num()]); } nrmb_gettime(&end); nrm_send_progress(context, 1, region_scope); time = nrmb_timediff(&start, &end); sumtime += time; mintime = NRMB_MIN(time, mintime); maxtime = NRMB_MAX(time, maxtime); } nrm_fini(context); nrm_ctxt_delete(context); /* Delete scopes */ nrm_scope_delete(region_scope); for (int i = 0; i < num_threads; i++) { nrm_scope_delete(thread_scope[i]); } /* compute stats */ /* report the configuration and timings */ fprintf(stdout, "NRM Benchmarks: %s\n", argv[0]); fprintf(stdout, "Version: %s\n", PACKAGE_VERSION); fprintf(stdout, "Description: one progress per iteration, Add benchmark\n"); fprintf(stdout, "Array size: %zu (elements).\n", array_size); fprintf(stdout, "Memory per array: %.1f MiB.\n", (double) memory_size /1024.0/1024.0); fprintf(stdout, "Kernel was executed: %ld times.\n", times); fprintf(stdout, "Number of threads: %d\n", num_threads); fprintf(stdout, "Time (s): avg: %11.6f min: %11.6f max: %11.6f\n", 1.0E-09 * sumtime/times, 1.0E-09 * mintime, 1.0E-09 * maxtime); fprintf(stdout, "Perf (MiB/s): avg: %12.6f best: %12.6f\n", (3.0E-06 * memory_size)/ (1.0E-09 * sumtime/times), (3.0E-06 * memory_size)/ (1.0E-09 * mintime)); /* validate the benchmark: minimum about of bits should be different. */ err = 0; for(size_t i = 0; i < array_size && err == 0; i++) err = err || !nrmb_check_double(3.0, c[i], 2); if(err) fprintf(stdout, "VALIDATION FAILED!!!!\n"); else fprintf(stdout, "VALIDATION PASSED!!!!\n"); return err; }
_eval.c
/* Generated by Cython 0.23.1 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_23_1" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__tribeflow___eval #define __PYX_HAVE_API__tribeflow___eval #include "string.h" #include "stdio.h" #include "pythread.h" #include "math.h" #include "stdlib.h" #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "tribeflow/_eval.pyx", "stringsource", "type.pxd", "bool.pxd", "complex.pxd", "tribeflow/mycollections/stamp_lists.pxd", "tribeflow/kernels/base.pxd", }; struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /*--- Type declarations ---*/ struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists; struct __pyx_obj_9tribeflow_7kernels_4base_Kernel; struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "tribeflow/mycollections/stamp_lists.pxd":10 * from __future__ import division, print_function * * cdef class StampLists: # <<<<<<<<<<<<<< * * cdef int[:] limits */ struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists { PyObject_HEAD struct __pyx_vtabstruct_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_vtab; __Pyx_memviewslice limits; __Pyx_memviewslice curr_sizes; int inc; double **values; }; /* "tribeflow/kernels/base.pxd":12 * from tribeflow.mycollections.stamp_lists cimport StampLists * * cdef class Kernel: # <<<<<<<<<<<<<< * * cdef double pdf(self, double x, int z, StampLists stamps) nogil */ struct __pyx_obj_9tribeflow_7kernels_4base_Kernel { PyObject_HEAD struct __pyx_vtabstruct_9tribeflow_7kernels_4base_Kernel *__pyx_vtab; }; /* "View.MemoryView":101 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":271 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":304 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":923 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "tribeflow/mycollections/stamp_lists.pxd":10 * from __future__ import division, print_function * * cdef class StampLists: # <<<<<<<<<<<<<< * * cdef int[:] limits */ struct __pyx_vtabstruct_9tribeflow_13mycollections_11stamp_lists_StampLists { void (*append)(struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *, int, double); double (*get)(struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *, int, int); double *(*get_all)(struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *, int); int (*size)(struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *, int); void (*clear)(struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *); }; static struct __pyx_vtabstruct_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_vtabptr_9tribeflow_13mycollections_11stamp_lists_StampLists; /* "tribeflow/kernels/base.pxd":12 * from tribeflow.mycollections.stamp_lists cimport StampLists * * cdef class Kernel: # <<<<<<<<<<<<<< * * cdef double pdf(self, double x, int z, StampLists stamps) nogil */ struct __pyx_vtabstruct_9tribeflow_7kernels_4base_Kernel { double (*pdf)(struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *, double, int, struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *); void (*mstep)(struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *, struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *); }; static struct __pyx_vtabstruct_9tribeflow_7kernels_4base_Kernel *__pyx_vtabptr_9tribeflow_7kernels_4base_Kernel; /* "View.MemoryView":304 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":923 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); #include <string.h> static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); static int __Pyx_SetVtable(PyObject *dict, void *vtable); static void* __Pyx_GetVtable(PyObject *dict); typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static PyObject *__pyx_memview_get_double(const char *itemp); static int __pyx_memview_set_double(const char *itemp, PyObject *obj); static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim); static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cpython.version' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'tribeflow' */ /* Module declarations from 'tribeflow._learn' */ static CYTHON_INLINE double (*__pyx_f_9tribeflow_6_learn_dir_posterior)(double, double, double, double); /*proto*/ /* Module declarations from 'tribeflow.mycollections.stamp_lists' */ static PyTypeObject *__pyx_ptype_9tribeflow_13mycollections_11stamp_lists_StampLists = 0; /* Module declarations from 'tribeflow.kernels.base' */ static PyTypeObject *__pyx_ptype_9tribeflow_7kernels_4base_Kernel = 0; /* Module declarations from 'tribeflow._eval' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "tribeflow._eval" int __pyx_module_is_main_tribeflow___eval = 0; /* Implementation of 'tribeflow._eval' */ static PyObject *__pyx_builtin_xrange; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static char __pyx_k_O[] = "O"; static char __pyx_k_c[] = "c"; static char __pyx_k_d[] = "d"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_j[] = "j"; static char __pyx_k_o[] = "o"; static char __pyx_k_p[] = "p"; static char __pyx_k_s[] = "s"; static char __pyx_k_z[] = "z"; static char __pyx_k_dt[] = "dt"; static char __pyx_k_i4[] = "i4"; static char __pyx_k_id[] = "id"; static char __pyx_k_np[] = "np"; static char __pyx_k_ns[] = "ns"; static char __pyx_k_nz[] = "nz"; static char __pyx_k_Dts[] = "Dts"; static char __pyx_k_HOs[] = "HOs"; static char __pyx_k_idx[] = "idx"; static char __pyx_k_mem[] = "mem"; static char __pyx_k_obj[] = "obj"; static char __pyx_k_rrs[] = "rrs"; static char __pyx_k_base[] = "base"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_mode[] = "mode"; static char __pyx_k_name[] = "name"; static char __pyx_k_ndim[] = "ndim"; static char __pyx_k_pack[] = "pack"; static char __pyx_k_size[] = "size"; static char __pyx_k_step[] = "step"; static char __pyx_k_stop[] = "stop"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_ASCII[] = "ASCII"; static char __pyx_k_Trace[] = "Trace"; static char __pyx_k_array[] = "array"; static char __pyx_k_class[] = "__class__"; static char __pyx_k_dtype[] = "dtype"; static char __pyx_k_error[] = "error"; static char __pyx_k_flags[] = "flags"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_shape[] = "shape"; static char __pyx_k_start[] = "start"; static char __pyx_k_zeros[] = "zeros"; static char __pyx_k_Psi_sz[] = "Psi_sz"; static char __pyx_k_encode[] = "encode"; static char __pyx_k_format[] = "format"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_kernel[] = "kernel"; static char __pyx_k_last_o[] = "last_o"; static char __pyx_k_name_2[] = "__name__"; static char __pyx_k_real_o[] = "real_o"; static char __pyx_k_struct[] = "struct"; static char __pyx_k_unpack[] = "unpack"; static char __pyx_k_xrange[] = "xrange"; static char __pyx_k_beta_zs[] = "beta_zs"; static char __pyx_k_count_h[] = "count_h"; static char __pyx_k_count_z[] = "count_z"; static char __pyx_k_fortran[] = "fortran"; static char __pyx_k_memview[] = "memview"; static char __pyx_k_Count_sz[] = "Count_sz"; static char __pyx_k_Count_zh[] = "Count_zh"; static char __pyx_k_Ellipsis[] = "Ellipsis"; static char __pyx_k_Theta_zh[] = "Theta_zh"; static char __pyx_k_alpha_zh[] = "alpha_zh"; static char __pyx_k_itemsize[] = "itemsize"; static char __pyx_k_ll_per_z[] = "ll_per_z"; static char __pyx_k_TypeError[] = "TypeError"; static char __pyx_k_enumerate[] = "enumerate"; static char __pyx_k_IndexError[] = "IndexError"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_mem_factor[] = "mem_factor"; static char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static char __pyx_k_MemoryError[] = "MemoryError"; static char __pyx_k_candidate_o[] = "candidate_o"; static char __pyx_k_predictions[] = "predictions"; static char __pyx_k_return_probs[] = "return_probs"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_allocate_buffer[] = "allocate_buffer"; static char __pyx_k_dtype_is_object[] = "dtype_is_object"; static char __pyx_k_previous_stamps[] = "previous_stamps"; static char __pyx_k_reciprocal_rank[] = "reciprocal_rank"; static char __pyx_k_tribeflow__eval[] = "tribeflow._eval"; static char __pyx_k_quality_estimate[] = "quality_estimate"; static char __pyx_k_strided_and_direct[] = "<strided and direct>"; static char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)"; static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct"; static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)"; static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)"; static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static char __pyx_k_home_mohame11_outlierDetection[] = "/home/mohame11/outlierDetection_gitRepo/Detecting-anomalies-in-user-trajectories/tribeflow/_eval.pyx"; static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced"; static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions"; static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Count_sz; static PyObject *__pyx_n_s_Count_zh; static PyObject *__pyx_n_s_Dts; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_n_s_HOs; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_Psi_sz; static PyObject *__pyx_n_s_Theta_zh; static PyObject *__pyx_n_s_Trace; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_alpha_zh; static PyObject *__pyx_n_s_array; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_beta_zs; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_candidate_o; static PyObject *__pyx_n_s_class; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_count_h; static PyObject *__pyx_n_s_count_z; static PyObject *__pyx_n_s_d; static PyObject *__pyx_n_s_dt; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_h; static PyObject *__pyx_kp_s_home_mohame11_outlierDetection; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_i4; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_idx; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_kernel; static PyObject *__pyx_n_s_last_o; static PyObject *__pyx_n_s_ll_per_z; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mem; static PyObject *__pyx_n_s_mem_factor; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_ns; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_nz; static PyObject *__pyx_n_s_o; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_p; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_predictions; static PyObject *__pyx_n_s_previous_stamps; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_quality_estimate; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_real_o; static PyObject *__pyx_n_s_reciprocal_rank; static PyObject *__pyx_n_s_return_probs; static PyObject *__pyx_n_s_rrs; static PyObject *__pyx_n_s_s; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tribeflow__eval; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_xrange; static PyObject *__pyx_n_s_z; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_9tribeflow_5_eval_quality_estimate(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_Dts, __Pyx_memviewslice __pyx_v_Trace, struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_v_previous_stamps, __Pyx_memviewslice __pyx_v_Count_zh, __Pyx_memviewslice __pyx_v_Count_sz, __Pyx_memviewslice __pyx_v_count_h, __Pyx_memviewslice __pyx_v_count_z, double __pyx_v_alpha_zh, double __pyx_v_beta_zs, __Pyx_memviewslice __pyx_v_ll_per_z, __Pyx_memviewslice __pyx_v_idx, struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *__pyx_v_kernel); /* proto */ static PyObject *__pyx_pf_9tribeflow_5_eval_2reciprocal_rank(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_Dts, __Pyx_memviewslice __pyx_v_HOs, struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_v_previous_stamps, __Pyx_memviewslice __pyx_v_Theta_zh, __Pyx_memviewslice __pyx_v_Psi_sz, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_count_z, struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *__pyx_v_kernel, PyBoolObject *__pyx_v_return_probs); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__10; static PyObject *__pyx_slice__11; static PyObject *__pyx_slice__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_codeobj__15; static PyObject *__pyx_codeobj__17; /* "tribeflow/_eval.pyx":21 * double log(double) nogil * * def quality_estimate(double[:,::1] Dts, int[:,::1] Trace, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, int[:,::1] Count_zh, int[:,::1] Count_sz, \ * int[::1] count_h, int[::1] count_z, double alpha_zh, double beta_zs, \ */ /* Python wrapper */ static PyObject *__pyx_pw_9tribeflow_5_eval_1quality_estimate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_9tribeflow_5_eval_1quality_estimate = {"quality_estimate", (PyCFunction)__pyx_pw_9tribeflow_5_eval_1quality_estimate, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_9tribeflow_5_eval_1quality_estimate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_Dts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Trace = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_v_previous_stamps = 0; __Pyx_memviewslice __pyx_v_Count_zh = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Count_sz = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_count_h = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_count_z = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_alpha_zh; double __pyx_v_beta_zs; __Pyx_memviewslice __pyx_v_ll_per_z = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_idx = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *__pyx_v_kernel = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("quality_estimate (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_Dts,&__pyx_n_s_Trace,&__pyx_n_s_previous_stamps,&__pyx_n_s_Count_zh,&__pyx_n_s_Count_sz,&__pyx_n_s_count_h,&__pyx_n_s_count_z,&__pyx_n_s_alpha_zh,&__pyx_n_s_beta_zs,&__pyx_n_s_ll_per_z,&__pyx_n_s_idx,&__pyx_n_s_kernel,0}; PyObject* values[12] = {0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Dts)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Trace)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_previous_stamps)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Count_zh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Count_sz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_count_h)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_count_z)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_alpha_zh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_beta_zs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ll_per_z)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_idx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_kernel)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "quality_estimate") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 12) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); } __pyx_v_Dts = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0]); if (unlikely(!__pyx_v_Dts.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Trace = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(values[1]); if (unlikely(!__pyx_v_Trace.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_previous_stamps = ((struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *)values[2]); __pyx_v_Count_zh = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(values[3]); if (unlikely(!__pyx_v_Count_zh.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Count_sz = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(values[4]); if (unlikely(!__pyx_v_Count_sz.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_count_h = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[5]); if (unlikely(!__pyx_v_count_h.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_count_z = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[6]); if (unlikely(!__pyx_v_count_z.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha_zh = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_alpha_zh == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_beta_zs = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_beta_zs == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_ll_per_z = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[9]); if (unlikely(!__pyx_v_ll_per_z.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_idx = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[10]); if (unlikely(!__pyx_v_idx.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_kernel = ((struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *)values[11]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("quality_estimate", 1, 12, 12, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("tribeflow._eval.quality_estimate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_previous_stamps), __pyx_ptype_9tribeflow_13mycollections_11stamp_lists_StampLists, 1, "previous_stamps", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_kernel), __pyx_ptype_9tribeflow_7kernels_4base_Kernel, 1, "kernel", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_9tribeflow_5_eval_quality_estimate(__pyx_self, __pyx_v_Dts, __pyx_v_Trace, __pyx_v_previous_stamps, __pyx_v_Count_zh, __pyx_v_Count_sz, __pyx_v_count_h, __pyx_v_count_z, __pyx_v_alpha_zh, __pyx_v_beta_zs, __pyx_v_ll_per_z, __pyx_v_idx, __pyx_v_kernel); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9tribeflow_5_eval_quality_estimate(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_Dts, __Pyx_memviewslice __pyx_v_Trace, struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_v_previous_stamps, __Pyx_memviewslice __pyx_v_Count_zh, __Pyx_memviewslice __pyx_v_Count_sz, __Pyx_memviewslice __pyx_v_count_h, __Pyx_memviewslice __pyx_v_count_z, double __pyx_v_alpha_zh, double __pyx_v_beta_zs, __Pyx_memviewslice __pyx_v_ll_per_z, __Pyx_memviewslice __pyx_v_idx, struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *__pyx_v_kernel) { int __pyx_v_nz; int __pyx_v_ns; int __pyx_v_i; int __pyx_v_h; int __pyx_v_z; int __pyx_v_o; double __pyx_v_dt; Py_ssize_t __pyx_v_j; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; __Pyx_RefNannySetupContext("quality_estimate", 0); /* "tribeflow/_eval.pyx":26 * double[::1] ll_per_z, int[::1] idx, Kernel kernel): * * cdef int nz = Count_zh.shape[0] # <<<<<<<<<<<<<< * cdef int ns = Count_sz.shape[0] * */ __pyx_v_nz = (__pyx_v_Count_zh.shape[0]); /* "tribeflow/_eval.pyx":27 * * cdef int nz = Count_zh.shape[0] * cdef int ns = Count_sz.shape[0] # <<<<<<<<<<<<<< * * cdef int i = 0 */ __pyx_v_ns = (__pyx_v_Count_sz.shape[0]); /* "tribeflow/_eval.pyx":29 * cdef int ns = Count_sz.shape[0] * * cdef int i = 0 # <<<<<<<<<<<<<< * cdef int h, z, o = 0 * cdef double dt = 0 */ __pyx_v_i = 0; /* "tribeflow/_eval.pyx":30 * * cdef int i = 0 * cdef int h, z, o = 0 # <<<<<<<<<<<<<< * cdef double dt = 0 * */ __pyx_v_o = 0; /* "tribeflow/_eval.pyx":31 * cdef int i = 0 * cdef int h, z, o = 0 * cdef double dt = 0 # <<<<<<<<<<<<<< * * for i in xrange(idx.shape[0]): */ __pyx_v_dt = 0.0; /* "tribeflow/_eval.pyx":33 * cdef double dt = 0 * * for i in xrange(idx.shape[0]): # <<<<<<<<<<<<<< * dt = Dts[idx[i], Dts.shape[1] - 1] * h = Trace[idx[i], 0] */ __pyx_t_1 = (__pyx_v_idx.shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "tribeflow/_eval.pyx":34 * * for i in xrange(idx.shape[0]): * dt = Dts[idx[i], Dts.shape[1] - 1] # <<<<<<<<<<<<<< * h = Trace[idx[i], 0] * z = Trace[idx[i], Trace.shape[1] - 1] */ __pyx_t_3 = __pyx_v_i; __pyx_t_4 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_idx.data) + __pyx_t_3)) ))); __pyx_t_5 = ((__pyx_v_Dts.shape[1]) - 1); __pyx_v_dt = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Dts.data + __pyx_t_4 * __pyx_v_Dts.strides[0]) )) + __pyx_t_5)) ))); /* "tribeflow/_eval.pyx":35 * for i in xrange(idx.shape[0]): * dt = Dts[idx[i], Dts.shape[1] - 1] * h = Trace[idx[i], 0] # <<<<<<<<<<<<<< * z = Trace[idx[i], Trace.shape[1] - 1] * for j in xrange(1, Trace.shape[1] - 1): */ __pyx_t_6 = __pyx_v_i; __pyx_t_7 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_idx.data) + __pyx_t_6)) ))); __pyx_t_8 = 0; __pyx_v_h = (*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_Trace.data + __pyx_t_7 * __pyx_v_Trace.strides[0]) )) + __pyx_t_8)) ))); /* "tribeflow/_eval.pyx":36 * dt = Dts[idx[i], Dts.shape[1] - 1] * h = Trace[idx[i], 0] * z = Trace[idx[i], Trace.shape[1] - 1] # <<<<<<<<<<<<<< * for j in xrange(1, Trace.shape[1] - 1): * o = Trace[i, j] */ __pyx_t_9 = __pyx_v_i; __pyx_t_10 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_idx.data) + __pyx_t_9)) ))); __pyx_t_11 = ((__pyx_v_Trace.shape[1]) - 1); __pyx_v_z = (*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_Trace.data + __pyx_t_10 * __pyx_v_Trace.strides[0]) )) + __pyx_t_11)) ))); /* "tribeflow/_eval.pyx":37 * h = Trace[idx[i], 0] * z = Trace[idx[i], Trace.shape[1] - 1] * for j in xrange(1, Trace.shape[1] - 1): # <<<<<<<<<<<<<< * o = Trace[i, j] * ll_per_z[z] += \ */ __pyx_t_12 = ((__pyx_v_Trace.shape[1]) - 1); for (__pyx_t_13 = 1; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_j = __pyx_t_13; /* "tribeflow/_eval.pyx":38 * z = Trace[idx[i], Trace.shape[1] - 1] * for j in xrange(1, Trace.shape[1] - 1): * o = Trace[i, j] # <<<<<<<<<<<<<< * ll_per_z[z] += \ * log(_learn.dir_posterior(Count_sz[o, z], count_z[z], ns, \ */ __pyx_t_14 = __pyx_v_i; __pyx_t_15 = __pyx_v_j; __pyx_v_o = (*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_Trace.data + __pyx_t_14 * __pyx_v_Trace.strides[0]) )) + __pyx_t_15)) ))); /* "tribeflow/_eval.pyx":40 * o = Trace[i, j] * ll_per_z[z] += \ * log(_learn.dir_posterior(Count_sz[o, z], count_z[z], ns, \ # <<<<<<<<<<<<<< * beta_zs)) * */ __pyx_t_16 = __pyx_v_o; __pyx_t_17 = __pyx_v_z; __pyx_t_18 = __pyx_v_z; /* "tribeflow/_eval.pyx":39 * for j in xrange(1, Trace.shape[1] - 1): * o = Trace[i, j] * ll_per_z[z] += \ # <<<<<<<<<<<<<< * log(_learn.dir_posterior(Count_sz[o, z], count_z[z], ns, \ * beta_zs)) */ __pyx_t_19 = __pyx_v_z; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_ll_per_z.data) + __pyx_t_19)) )) += log(__pyx_f_9tribeflow_6_learn_dir_posterior((*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_Count_sz.data + __pyx_t_16 * __pyx_v_Count_sz.strides[0]) )) + __pyx_t_17)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_count_z.data) + __pyx_t_18)) ))), __pyx_v_ns, __pyx_v_beta_zs)); } /* "tribeflow/_eval.pyx":44 * * ll_per_z[z] += \ * log(_learn.dir_posterior(Count_zh[z, h], count_h[h], nz, \ # <<<<<<<<<<<<<< * alpha_zh)) + \ * log(kernel.pdf(dt, z, previous_stamps)) */ __pyx_t_20 = __pyx_v_z; __pyx_t_21 = __pyx_v_h; __pyx_t_22 = __pyx_v_h; /* "tribeflow/_eval.pyx":43 * beta_zs)) * * ll_per_z[z] += \ # <<<<<<<<<<<<<< * log(_learn.dir_posterior(Count_zh[z, h], count_h[h], nz, \ * alpha_zh)) + \ */ __pyx_t_23 = __pyx_v_z; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_ll_per_z.data) + __pyx_t_23)) )) += (log(__pyx_f_9tribeflow_6_learn_dir_posterior((*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_Count_zh.data + __pyx_t_20 * __pyx_v_Count_zh.strides[0]) )) + __pyx_t_21)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_count_h.data) + __pyx_t_22)) ))), __pyx_v_nz, __pyx_v_alpha_zh)) + log(((struct __pyx_vtabstruct_9tribeflow_7kernels_4base_Kernel *)__pyx_v_kernel->__pyx_vtab)->pdf(__pyx_v_kernel, __pyx_v_dt, __pyx_v_z, __pyx_v_previous_stamps))); } /* "tribeflow/_eval.pyx":21 * double log(double) nogil * * def quality_estimate(double[:,::1] Dts, int[:,::1] Trace, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, int[:,::1] Count_zh, int[:,::1] Count_sz, \ * int[::1] count_h, int[::1] count_z, double alpha_zh, double beta_zs, \ */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __PYX_XDEC_MEMVIEW(&__pyx_v_Dts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Trace, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Count_zh, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Count_sz, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_count_h, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_count_z, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_ll_per_z, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_idx, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "tribeflow/_eval.pyx":48 * log(kernel.pdf(dt, z, previous_stamps)) * * def reciprocal_rank(double[:, ::1] Dts, int[:, ::1] HOs, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, double[:, ::1] Theta_zh, \ * double[:, ::1] Psi_sz, int[::1] count_z, Kernel kernel, */ /* Python wrapper */ static PyObject *__pyx_pw_9tribeflow_5_eval_3reciprocal_rank(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_9tribeflow_5_eval_2reciprocal_rank[] = "\n Computes the reciprocal rank of predictions. Parameter descriptions\n below consider a burst of size `B`.\n\n Parameters\n ----------\n Dts: with inter event times. Shape is (n_events, B)\n HOs: hyper node (users) with burst (objetcs). Shape is (n_events, B+1). The\n last element in each row is the true object.\n previous_stamps: time_stamp to compute inter event times\n Theta_zh, Psi_sz, count_z: outputs of tribelow\n kernel: inter event kernel\n return_probs: boolean indicating if we should return full probabilities\n (unormalized for each row).\n \n Returns\n -------\n\n An array with reciprocal ranks and another with probabilities.\n "; static PyMethodDef __pyx_mdef_9tribeflow_5_eval_3reciprocal_rank = {"reciprocal_rank", (PyCFunction)__pyx_pw_9tribeflow_5_eval_3reciprocal_rank, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9tribeflow_5_eval_2reciprocal_rank}; static PyObject *__pyx_pw_9tribeflow_5_eval_3reciprocal_rank(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_Dts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_HOs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_v_previous_stamps = 0; __Pyx_memviewslice __pyx_v_Theta_zh = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Psi_sz = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED __Pyx_memviewslice __pyx_v_count_z = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *__pyx_v_kernel = 0; PyBoolObject *__pyx_v_return_probs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("reciprocal_rank (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_Dts,&__pyx_n_s_HOs,&__pyx_n_s_previous_stamps,&__pyx_n_s_Theta_zh,&__pyx_n_s_Psi_sz,&__pyx_n_s_count_z,&__pyx_n_s_kernel,&__pyx_n_s_return_probs,0}; PyObject* values[8] = {0,0,0,0,0,0,0,0}; /* "tribeflow/_eval.pyx":51 * StampLists previous_stamps, double[:, ::1] Theta_zh, \ * double[:, ::1] Psi_sz, int[::1] count_z, Kernel kernel, * bool return_probs=False): # <<<<<<<<<<<<<< * ''' * Computes the reciprocal rank of predictions. Parameter descriptions */ values[7] = (PyObject *)((PyBoolObject *)Py_False); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Dts)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_HOs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("reciprocal_rank", 0, 7, 8, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_previous_stamps)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("reciprocal_rank", 0, 7, 8, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Theta_zh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("reciprocal_rank", 0, 7, 8, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Psi_sz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("reciprocal_rank", 0, 7, 8, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_count_z)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("reciprocal_rank", 0, 7, 8, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_kernel)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("reciprocal_rank", 0, 7, 8, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_return_probs); if (value) { values[7] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "reciprocal_rank") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Dts = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0]); if (unlikely(!__pyx_v_Dts.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_HOs = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(values[1]); if (unlikely(!__pyx_v_HOs.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_previous_stamps = ((struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *)values[2]); __pyx_v_Theta_zh = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[3]); if (unlikely(!__pyx_v_Theta_zh.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Psi_sz = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[4]); if (unlikely(!__pyx_v_Psi_sz.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_count_z = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[5]); if (unlikely(!__pyx_v_count_z.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_kernel = ((struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *)values[6]); __pyx_v_return_probs = ((PyBoolObject *)values[7]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("reciprocal_rank", 0, 7, 8, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("tribeflow._eval.reciprocal_rank", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_previous_stamps), __pyx_ptype_9tribeflow_13mycollections_11stamp_lists_StampLists, 1, "previous_stamps", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_kernel), __pyx_ptype_9tribeflow_7kernels_4base_Kernel, 1, "kernel", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_return_probs), __pyx_ptype_7cpython_4bool_bool, 1, "return_probs", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_9tribeflow_5_eval_2reciprocal_rank(__pyx_self, __pyx_v_Dts, __pyx_v_HOs, __pyx_v_previous_stamps, __pyx_v_Theta_zh, __pyx_v_Psi_sz, __pyx_v_count_z, __pyx_v_kernel, __pyx_v_return_probs); /* "tribeflow/_eval.pyx":48 * log(kernel.pdf(dt, z, previous_stamps)) * * def reciprocal_rank(double[:, ::1] Dts, int[:, ::1] HOs, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, double[:, ::1] Theta_zh, \ * double[:, ::1] Psi_sz, int[::1] count_z, Kernel kernel, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9tribeflow_5_eval_2reciprocal_rank(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_Dts, __Pyx_memviewslice __pyx_v_HOs, struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists *__pyx_v_previous_stamps, __Pyx_memviewslice __pyx_v_Theta_zh, __Pyx_memviewslice __pyx_v_Psi_sz, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_count_z, struct __pyx_obj_9tribeflow_7kernels_4base_Kernel *__pyx_v_kernel, PyBoolObject *__pyx_v_return_probs) { double __pyx_v_dt; int __pyx_v_h; CYTHON_UNUSED int __pyx_v_s; int __pyx_v_real_o; int __pyx_v_candidate_o; int __pyx_v_last_o; int __pyx_v_z; int __pyx_v_ns; __Pyx_memviewslice __pyx_v_mem = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_mem_factor = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_p = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_rrs = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_predictions = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; int __pyx_v_j; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_t_8 = NULL; __Pyx_memviewslice __pyx_t_9 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; int __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; int __pyx_t_25; int __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; int __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; Py_ssize_t __pyx_t_43; Py_ssize_t __pyx_t_44; int __pyx_t_45; Py_ssize_t __pyx_t_46; Py_ssize_t __pyx_t_47; int __pyx_t_48; Py_ssize_t __pyx_t_49; Py_ssize_t __pyx_t_50; Py_ssize_t __pyx_t_51; Py_ssize_t __pyx_t_52; PyObject *__pyx_t_53 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("reciprocal_rank", 0); /* "tribeflow/_eval.pyx":73 * ''' * * cdef double dt = 0 # <<<<<<<<<<<<<< * cdef int h = 0 * cdef int s = 0 */ __pyx_v_dt = 0.0; /* "tribeflow/_eval.pyx":74 * * cdef double dt = 0 * cdef int h = 0 # <<<<<<<<<<<<<< * cdef int s = 0 * cdef int real_o = 0 */ __pyx_v_h = 0; /* "tribeflow/_eval.pyx":75 * cdef double dt = 0 * cdef int h = 0 * cdef int s = 0 # <<<<<<<<<<<<<< * cdef int real_o = 0 * cdef int candidate_o = 0 */ __pyx_v_s = 0; /* "tribeflow/_eval.pyx":76 * cdef int h = 0 * cdef int s = 0 * cdef int real_o = 0 # <<<<<<<<<<<<<< * cdef int candidate_o = 0 * cdef int last_o = 0 */ __pyx_v_real_o = 0; /* "tribeflow/_eval.pyx":77 * cdef int s = 0 * cdef int real_o = 0 * cdef int candidate_o = 0 # <<<<<<<<<<<<<< * cdef int last_o = 0 * */ __pyx_v_candidate_o = 0; /* "tribeflow/_eval.pyx":78 * cdef int real_o = 0 * cdef int candidate_o = 0 * cdef int last_o = 0 # <<<<<<<<<<<<<< * * cdef int z = 0 */ __pyx_v_last_o = 0; /* "tribeflow/_eval.pyx":80 * cdef int last_o = 0 * * cdef int z = 0 # <<<<<<<<<<<<<< * cdef int ns = Psi_sz.shape[0] * */ __pyx_v_z = 0; /* "tribeflow/_eval.pyx":81 * * cdef int z = 0 * cdef int ns = Psi_sz.shape[0] # <<<<<<<<<<<<<< * * cdef int[::1] mem = np.zeros(Dts.shape[1], dtype='i4') #mem.shape = (B,). Dts.shape[1] is B (history) */ __pyx_v_ns = (__pyx_v_Psi_sz.shape[0]); /* "tribeflow/_eval.pyx":83 * cdef int ns = Psi_sz.shape[0] * * cdef int[::1] mem = np.zeros(Dts.shape[1], dtype='i4') #mem.shape = (B,). Dts.shape[1] is B (history) # <<<<<<<<<<<<<< * cdef double[::1] mem_factor = np.zeros(Psi_sz.shape[1], dtype='d') #mem_factor.shape = (nz,) * cdef double[::1] p = np.zeros(Psi_sz.shape[0], dtype='d')# p.shape = (no,) */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t((__pyx_v_Dts.shape[1])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_n_s_i4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dc_int(__pyx_t_4); if (unlikely(!__pyx_t_5.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mem = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "tribeflow/_eval.pyx":84 * * cdef int[::1] mem = np.zeros(Dts.shape[1], dtype='i4') #mem.shape = (B,). Dts.shape[1] is B (history) * cdef double[::1] mem_factor = np.zeros(Psi_sz.shape[1], dtype='d') #mem_factor.shape = (nz,) # <<<<<<<<<<<<<< * cdef double[::1] p = np.zeros(Psi_sz.shape[0], dtype='d')# p.shape = (no,) * */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyInt_FromSsize_t((__pyx_v_Psi_sz.shape[1])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_d) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_2); if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_mem_factor = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "tribeflow/_eval.pyx":85 * cdef int[::1] mem = np.zeros(Dts.shape[1], dtype='i4') #mem.shape = (B,). Dts.shape[1] is B (history) * cdef double[::1] mem_factor = np.zeros(Psi_sz.shape[1], dtype='d') #mem_factor.shape = (nz,) * cdef double[::1] p = np.zeros(Psi_sz.shape[0], dtype='d')# p.shape = (no,) # <<<<<<<<<<<<<< * * cdef double[:] rrs = np.zeros(shape=(HOs.shape[0], ), dtype='d') */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromSsize_t((__pyx_v_Psi_sz.shape[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_n_s_d) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_p = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "tribeflow/_eval.pyx":87 * cdef double[::1] p = np.zeros(Psi_sz.shape[0], dtype='d')# p.shape = (no,) * * cdef double[:] rrs = np.zeros(shape=(HOs.shape[0], ), dtype='d') # <<<<<<<<<<<<<< * cdef double[:, ::1] predictions = \ * np.zeros(shape=(HOs.shape[0], ns), dtype='d') */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromSsize_t((__pyx_v_HOs.shape[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_shape, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_n_s_d) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_4); if (unlikely(!__pyx_t_7.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_rrs = __pyx_t_7; __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; /* "tribeflow/_eval.pyx":89 * cdef double[:] rrs = np.zeros(shape=(HOs.shape[0], ), dtype='d') * cdef double[:, ::1] predictions = \ * np.zeros(shape=(HOs.shape[0], ns), dtype='d') # <<<<<<<<<<<<<< * * cdef int i, j */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyInt_FromSsize_t((__pyx_v_HOs.shape[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_ns); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_shape, __pyx_t_8) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_d) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_9 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_8); if (unlikely(!__pyx_t_9.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_v_predictions = __pyx_t_9; __pyx_t_9.memview = NULL; __pyx_t_9.data = NULL; /* "tribeflow/_eval.pyx":92 * * cdef int i, j * for i in xrange(HOs.shape[0]): # for all test instances # <<<<<<<<<<<<<< * dt = Dts[i, Dts.shape[1] - 1] # getting the time of next item (to be predicted) * h = HOs[i, 0] #the userid */ __pyx_t_10 = (__pyx_v_HOs.shape[0]); for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "tribeflow/_eval.pyx":93 * cdef int i, j * for i in xrange(HOs.shape[0]): # for all test instances * dt = Dts[i, Dts.shape[1] - 1] # getting the time of next item (to be predicted) # <<<<<<<<<<<<<< * h = HOs[i, 0] #the userid * for j in xrange(mem.shape[0]): # for B */ __pyx_t_12 = __pyx_v_i; __pyx_t_13 = ((__pyx_v_Dts.shape[1]) - 1); __pyx_v_dt = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Dts.data + __pyx_t_12 * __pyx_v_Dts.strides[0]) )) + __pyx_t_13)) ))); /* "tribeflow/_eval.pyx":94 * for i in xrange(HOs.shape[0]): # for all test instances * dt = Dts[i, Dts.shape[1] - 1] # getting the time of next item (to be predicted) * h = HOs[i, 0] #the userid # <<<<<<<<<<<<<< * for j in xrange(mem.shape[0]): # for B * mem[j] = HOs[i, 1 + j] #mem will hold a list of the B history obj ids. i.e. mem[0] = id(ob1). i.e. Excluding the last item(to be predicted one) */ __pyx_t_14 = __pyx_v_i; __pyx_t_15 = 0; __pyx_v_h = (*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_HOs.data + __pyx_t_14 * __pyx_v_HOs.strides[0]) )) + __pyx_t_15)) ))); /* "tribeflow/_eval.pyx":95 * dt = Dts[i, Dts.shape[1] - 1] # getting the time of next item (to be predicted) * h = HOs[i, 0] #the userid * for j in xrange(mem.shape[0]): # for B # <<<<<<<<<<<<<< * mem[j] = HOs[i, 1 + j] #mem will hold a list of the B history obj ids. i.e. mem[0] = id(ob1). i.e. Excluding the last item(to be predicted one) * real_o = HOs[i, HOs.shape[1] - 1] # the gold object id to be predicted */ __pyx_t_16 = (__pyx_v_mem.shape[0]); for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "tribeflow/_eval.pyx":96 * h = HOs[i, 0] #the userid * for j in xrange(mem.shape[0]): # for B * mem[j] = HOs[i, 1 + j] #mem will hold a list of the B history obj ids. i.e. mem[0] = id(ob1). i.e. Excluding the last item(to be predicted one) # <<<<<<<<<<<<<< * real_o = HOs[i, HOs.shape[1] - 1] # the gold object id to be predicted * last_o = HOs[i, HOs.shape[1] - 2] # the last object id to be predicted (object #B) */ __pyx_t_18 = __pyx_v_i; __pyx_t_19 = (1 + __pyx_v_j); __pyx_t_20 = __pyx_v_j; *((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_mem.data) + __pyx_t_20)) )) = (*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_HOs.data + __pyx_t_18 * __pyx_v_HOs.strides[0]) )) + __pyx_t_19)) ))); } /* "tribeflow/_eval.pyx":97 * for j in xrange(mem.shape[0]): # for B * mem[j] = HOs[i, 1 + j] #mem will hold a list of the B history obj ids. i.e. mem[0] = id(ob1). i.e. Excluding the last item(to be predicted one) * real_o = HOs[i, HOs.shape[1] - 1] # the gold object id to be predicted # <<<<<<<<<<<<<< * last_o = HOs[i, HOs.shape[1] - 2] # the last object id to be predicted (object #B) * */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = ((__pyx_v_HOs.shape[1]) - 1); __pyx_v_real_o = (*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_HOs.data + __pyx_t_21 * __pyx_v_HOs.strides[0]) )) + __pyx_t_22)) ))); /* "tribeflow/_eval.pyx":98 * mem[j] = HOs[i, 1 + j] #mem will hold a list of the B history obj ids. i.e. mem[0] = id(ob1). i.e. Excluding the last item(to be predicted one) * real_o = HOs[i, HOs.shape[1] - 1] # the gold object id to be predicted * last_o = HOs[i, HOs.shape[1] - 2] # the last object id to be predicted (object #B) # <<<<<<<<<<<<<< * * for candidate_o in prange(ns, schedule='static', nogil=True): # for all objects (ns = #objs) */ __pyx_t_23 = __pyx_v_i; __pyx_t_24 = ((__pyx_v_HOs.shape[1]) - 2); __pyx_v_last_o = (*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_HOs.data + __pyx_t_23 * __pyx_v_HOs.strides[0]) )) + __pyx_t_24)) ))); /* "tribeflow/_eval.pyx":100 * last_o = HOs[i, HOs.shape[1] - 2] # the last object id to be predicted (object #B) * * for candidate_o in prange(ns, schedule='static', nogil=True): # for all objects (ns = #objs) # <<<<<<<<<<<<<< * p[candidate_o] = 0.0 * #calculation for loop */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_17 = __pyx_v_ns; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_26 = (__pyx_t_17 - 0) / 1; if (__pyx_t_26 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_27) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_candidate_o) lastprivate(__pyx_v_candidate_o) schedule(static) #endif /* _OPENMP */ for (__pyx_t_25 = 0; __pyx_t_25 < __pyx_t_26; __pyx_t_25++){ { __pyx_v_candidate_o = 0 + 1 * __pyx_t_25; /* "tribeflow/_eval.pyx":101 * * for candidate_o in prange(ns, schedule='static', nogil=True): # for all objects (ns = #objs) * p[candidate_o] = 0.0 # <<<<<<<<<<<<<< * #calculation for loop * for z in xrange(Psi_sz.shape[1]): #for envs */ __pyx_t_27 = __pyx_v_candidate_o; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_p.data) + __pyx_t_27)) )) = 0.0; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "tribeflow/_eval.pyx":100 * last_o = HOs[i, HOs.shape[1] - 2] # the last object id to be predicted (object #B) * * for candidate_o in prange(ns, schedule='static', nogil=True): # for all objects (ns = #objs) # <<<<<<<<<<<<<< * p[candidate_o] = 0.0 * #calculation for loop */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L11; } __pyx_L11:; } } /* "tribeflow/_eval.pyx":103 * p[candidate_o] = 0.0 * #calculation for loop * for z in xrange(Psi_sz.shape[1]): #for envs # <<<<<<<<<<<<<< * mem_factor[z] = 1.0 * for j in xrange(mem.shape[0]):#for all B (history objs) */ __pyx_t_16 = (__pyx_v_Psi_sz.shape[1]); for (__pyx_t_26 = 0; __pyx_t_26 < __pyx_t_16; __pyx_t_26+=1) { __pyx_v_z = __pyx_t_26; /* "tribeflow/_eval.pyx":104 * #calculation for loop * for z in xrange(Psi_sz.shape[1]): #for envs * mem_factor[z] = 1.0 # <<<<<<<<<<<<<< * for j in xrange(mem.shape[0]):#for all B (history objs) * #i.e. multiply all psi[objid1,z]*psi[objid2,z]*..psi[objidB,z] */ __pyx_t_28 = __pyx_v_z; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_mem_factor.data) + __pyx_t_28)) )) = 1.0; /* "tribeflow/_eval.pyx":105 * for z in xrange(Psi_sz.shape[1]): #for envs * mem_factor[z] = 1.0 * for j in xrange(mem.shape[0]):#for all B (history objs) # <<<<<<<<<<<<<< * #i.e. multiply all psi[objid1,z]*psi[objid2,z]*..psi[objidB,z] * mem_factor[z] *= Psi_sz[mem[j], z] # Psi[objId, env z] */ __pyx_t_29 = (__pyx_v_mem.shape[0]); for (__pyx_t_25 = 0; __pyx_t_25 < __pyx_t_29; __pyx_t_25+=1) { __pyx_v_j = __pyx_t_25; /* "tribeflow/_eval.pyx":107 * for j in xrange(mem.shape[0]):#for all B (history objs) * #i.e. multiply all psi[objid1,z]*psi[objid2,z]*..psi[objidB,z] * mem_factor[z] *= Psi_sz[mem[j], z] # Psi[objId, env z] # <<<<<<<<<<<<<< * * mem_factor[z] *= 1.0 / (1 - Psi_sz[mem[mem.shape[0] - 1], z])# 1-Psi_sz[mem[B-1],z] == 1-psi_sz[objIdB(the last one in the history),z] */ __pyx_t_30 = __pyx_v_j; __pyx_t_31 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_mem.data) + __pyx_t_30)) ))); __pyx_t_32 = __pyx_v_z; __pyx_t_33 = __pyx_v_z; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_mem_factor.data) + __pyx_t_33)) )) *= (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Psi_sz.data + __pyx_t_31 * __pyx_v_Psi_sz.strides[0]) )) + __pyx_t_32)) ))); } } /* "tribeflow/_eval.pyx":109 * mem_factor[z] *= Psi_sz[mem[j], z] # Psi[objId, env z] * * mem_factor[z] *= 1.0 / (1 - Psi_sz[mem[mem.shape[0] - 1], z])# 1-Psi_sz[mem[B-1],z] == 1-psi_sz[objIdB(the last one in the history),z] # <<<<<<<<<<<<<< * * for z in xrange(Psi_sz.shape[1]): */ __pyx_t_34 = ((__pyx_v_mem.shape[0]) - 1); __pyx_t_35 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_mem.data) + __pyx_t_34)) ))); __pyx_t_36 = __pyx_v_z; __pyx_t_37 = __pyx_v_z; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_mem_factor.data) + __pyx_t_37)) )) *= (1.0 / (1.0 - (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Psi_sz.data + __pyx_t_35 * __pyx_v_Psi_sz.strides[0]) )) + __pyx_t_36)) ))))); /* "tribeflow/_eval.pyx":111 * mem_factor[z] *= 1.0 / (1 - Psi_sz[mem[mem.shape[0] - 1], z])# 1-Psi_sz[mem[B-1],z] == 1-psi_sz[objIdB(the last one in the history),z] * * for z in xrange(Psi_sz.shape[1]): # <<<<<<<<<<<<<< * for candidate_o in prange(ns, schedule='static', nogil=True): * p[candidate_o] += mem_factor[z] * \ */ __pyx_t_16 = (__pyx_v_Psi_sz.shape[1]); for (__pyx_t_26 = 0; __pyx_t_26 < __pyx_t_16; __pyx_t_26+=1) { __pyx_v_z = __pyx_t_26; /* "tribeflow/_eval.pyx":112 * * for z in xrange(Psi_sz.shape[1]): * for candidate_o in prange(ns, schedule='static', nogil=True): # <<<<<<<<<<<<<< * p[candidate_o] += mem_factor[z] * \ * Psi_sz[candidate_o, z] * Theta_zh[z, h] * \ */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_25 = __pyx_v_ns; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_38 = (__pyx_t_25 - 0) / 1; if (__pyx_t_38 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_40, __pyx_t_41, __pyx_t_44, __pyx_t_43, __pyx_t_39, __pyx_t_42) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_candidate_o) lastprivate(__pyx_v_candidate_o) schedule(static) #endif /* _OPENMP */ for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_38; __pyx_t_17++){ { __pyx_v_candidate_o = 0 + 1 * __pyx_t_17; /* "tribeflow/_eval.pyx":113 * for z in xrange(Psi_sz.shape[1]): * for candidate_o in prange(ns, schedule='static', nogil=True): * p[candidate_o] += mem_factor[z] * \ # <<<<<<<<<<<<<< * Psi_sz[candidate_o, z] * Theta_zh[z, h] * \ * kernel.pdf(dt, z, previous_stamps) */ __pyx_t_39 = __pyx_v_z; /* "tribeflow/_eval.pyx":114 * for candidate_o in prange(ns, schedule='static', nogil=True): * p[candidate_o] += mem_factor[z] * \ * Psi_sz[candidate_o, z] * Theta_zh[z, h] * \ # <<<<<<<<<<<<<< * kernel.pdf(dt, z, previous_stamps) * */ __pyx_t_40 = __pyx_v_candidate_o; __pyx_t_41 = __pyx_v_z; /* "tribeflow/_eval.pyx":113 * for z in xrange(Psi_sz.shape[1]): * for candidate_o in prange(ns, schedule='static', nogil=True): * p[candidate_o] += mem_factor[z] * \ # <<<<<<<<<<<<<< * Psi_sz[candidate_o, z] * Theta_zh[z, h] * \ * kernel.pdf(dt, z, previous_stamps) */ __pyx_t_42 = __pyx_v_z; __pyx_t_43 = __pyx_v_h; /* "tribeflow/_eval.pyx":114 * for candidate_o in prange(ns, schedule='static', nogil=True): * p[candidate_o] += mem_factor[z] * \ * Psi_sz[candidate_o, z] * Theta_zh[z, h] * \ # <<<<<<<<<<<<<< * kernel.pdf(dt, z, previous_stamps) * */ __pyx_t_44 = __pyx_v_candidate_o; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_p.data) + __pyx_t_44)) )) += ((((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_mem_factor.data) + __pyx_t_39)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Psi_sz.data + __pyx_t_40 * __pyx_v_Psi_sz.strides[0]) )) + __pyx_t_41)) )))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Theta_zh.data + __pyx_t_42 * __pyx_v_Theta_zh.strides[0]) )) + __pyx_t_43)) )))) * ((struct __pyx_vtabstruct_9tribeflow_7kernels_4base_Kernel *)__pyx_v_kernel->__pyx_vtab)->pdf(__pyx_v_kernel, __pyx_v_dt, __pyx_v_z, __pyx_v_previous_stamps)); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "tribeflow/_eval.pyx":112 * * for z in xrange(Psi_sz.shape[1]): * for candidate_o in prange(ns, schedule='static', nogil=True): # <<<<<<<<<<<<<< * p[candidate_o] += mem_factor[z] * \ * Psi_sz[candidate_o, z] * Theta_zh[z, h] * \ */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L28; } __pyx_L28:; } } } /* "tribeflow/_eval.pyx":117 * kernel.pdf(dt, z, previous_stamps) * * for candidate_o in prange(ns, schedule='static', nogil=True): # <<<<<<<<<<<<<< * if p[candidate_o] >= p[real_o] and candidate_o != last_o: * rrs[i] += 1 */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_26 = __pyx_v_ns; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_17 = (__pyx_t_26 - 0) / 1; if (__pyx_t_17 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_46, __pyx_t_52, __pyx_t_48, __pyx_t_45, __pyx_t_50, __pyx_t_49, __pyx_t_47, __pyx_t_51) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_candidate_o) lastprivate(__pyx_v_candidate_o) schedule(static) #endif /* _OPENMP */ for (__pyx_t_38 = 0; __pyx_t_38 < __pyx_t_17; __pyx_t_38++){ { __pyx_v_candidate_o = 0 + 1 * __pyx_t_38; /* "tribeflow/_eval.pyx":118 * * for candidate_o in prange(ns, schedule='static', nogil=True): * if p[candidate_o] >= p[real_o] and candidate_o != last_o: # <<<<<<<<<<<<<< * rrs[i] += 1 * predictions[i, candidate_o] = p[candidate_o] */ __pyx_t_46 = __pyx_v_candidate_o; __pyx_t_47 = __pyx_v_real_o; __pyx_t_48 = (((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_p.data) + __pyx_t_46)) ))) >= (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_p.data) + __pyx_t_47)) )))) != 0); if (__pyx_t_48) { } else { __pyx_t_45 = __pyx_t_48; goto __pyx_L45_bool_binop_done; } __pyx_t_48 = ((__pyx_v_candidate_o != __pyx_v_last_o) != 0); __pyx_t_45 = __pyx_t_48; __pyx_L45_bool_binop_done:; if (__pyx_t_45) { /* "tribeflow/_eval.pyx":119 * for candidate_o in prange(ns, schedule='static', nogil=True): * if p[candidate_o] >= p[real_o] and candidate_o != last_o: * rrs[i] += 1 # <<<<<<<<<<<<<< * predictions[i, candidate_o] = p[candidate_o] * */ __pyx_t_49 = __pyx_v_i; *((double *) ( /* dim=0 */ (__pyx_v_rrs.data + __pyx_t_49 * __pyx_v_rrs.strides[0]) )) += 1.0; /* "tribeflow/_eval.pyx":118 * * for candidate_o in prange(ns, schedule='static', nogil=True): * if p[candidate_o] >= p[real_o] and candidate_o != last_o: # <<<<<<<<<<<<<< * rrs[i] += 1 * predictions[i, candidate_o] = p[candidate_o] */ } /* "tribeflow/_eval.pyx":120 * if p[candidate_o] >= p[real_o] and candidate_o != last_o: * rrs[i] += 1 * predictions[i, candidate_o] = p[candidate_o] # <<<<<<<<<<<<<< * * if return_probs: */ __pyx_t_50 = __pyx_v_candidate_o; __pyx_t_51 = __pyx_v_i; __pyx_t_52 = __pyx_v_candidate_o; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_predictions.data + __pyx_t_51 * __pyx_v_predictions.strides[0]) )) + __pyx_t_52)) )) = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_p.data) + __pyx_t_50)) ))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "tribeflow/_eval.pyx":117 * kernel.pdf(dt, z, previous_stamps) * * for candidate_o in prange(ns, schedule='static', nogil=True): # <<<<<<<<<<<<<< * if p[candidate_o] >= p[real_o] and candidate_o != last_o: * rrs[i] += 1 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L39; } __pyx_L39:; } } } /* "tribeflow/_eval.pyx":122 * predictions[i, candidate_o] = p[candidate_o] * * if return_probs: # <<<<<<<<<<<<<< * return np.array(rrs), np.array(predictions) * else: */ __pyx_t_45 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_return_probs)); if (unlikely(__pyx_t_45 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_45) { /* "tribeflow/_eval.pyx":123 * * if return_probs: * return np.array(rrs), np.array(predictions) # <<<<<<<<<<<<<< * else: * return np.array(rrs) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_v_rrs, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (!__pyx_t_3) { __pyx_t_8 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_8); } else { __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_predictions, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_3) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_53 = PyTuple_New(1+1); if (unlikely(!__pyx_t_53)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_53); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_53, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_53, 0+1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_53, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_53); __pyx_t_53 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_8 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "tribeflow/_eval.pyx":122 * predictions[i, candidate_o] = p[candidate_o] * * if return_probs: # <<<<<<<<<<<<<< * return np.array(rrs), np.array(predictions) * else: */ } /* "tribeflow/_eval.pyx":125 * return np.array(rrs), np.array(predictions) * else: * return np.array(rrs) # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_rrs, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_53 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_8))) { __pyx_t_53 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_53)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_53); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); } } if (!__pyx_t_53) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else { __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_53); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_53); __pyx_t_53 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "tribeflow/_eval.pyx":48 * log(kernel.pdf(dt, z, previous_stamps)) * * def reciprocal_rank(double[:, ::1] Dts, int[:, ::1] HOs, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, double[:, ::1] Theta_zh, \ * double[:, ::1] Psi_sz, int[::1] count_z, Kernel kernel, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1); __Pyx_XDECREF(__pyx_t_8); __PYX_XDEC_MEMVIEW(&__pyx_t_9, 1); __Pyx_XDECREF(__pyx_t_53); __Pyx_AddTraceback("tribeflow._eval.reciprocal_rank", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_mem, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_mem_factor, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_p, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_rrs, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_predictions, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Dts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_HOs, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Theta_zh, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Psi_sz, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_count_z, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":118 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "View.MemoryView":119 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":118 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":125 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":126 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":128 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":129 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":128 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":131 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":132 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":131 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":134 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":135 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":134 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":136 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":137 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ __pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":140 * * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyMem_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":141 * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":143 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":144 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":143 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":147 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":148 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":149 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":148 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":150 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":147 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":153 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":154 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":155 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":153 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":156 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":157 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":158 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":156 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":160 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L10:; /* "View.MemoryView":162 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":165 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":166 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":167 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":170 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":171 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":171 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":174 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":175 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":176 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":177 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":178 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":174 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":167 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":118 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":181 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":182 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":183 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":184 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":183 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":185 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":186 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":187 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":187 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":189 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":190 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":191 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":192 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":193 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":194 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":195 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":196 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":198 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":199 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":198 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":201 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":203 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":181 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":207 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":208 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":209 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":208 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":210 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":212 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":211 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":214 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyMem_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":210 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":215 * self._strides, self.ndim, False) * free(self.data) * PyMem_Free(self._shape) # <<<<<<<<<<<<<< * * property memview: */ PyMem_Free(__pyx_v_self->_shape); /* "View.MemoryView":207 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":219 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* Python wrapper */ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * def __get__(self): * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":222 * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":219 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":225 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":226 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":225 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":228 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":229 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":228 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":231 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":232 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":231 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":240 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":241 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":240 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":243 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":244 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":243 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":245 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":247 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":236 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":273 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":274 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":273 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":275 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":276 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":275 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":290 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":292 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":296 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":298 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":299 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":298 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":301 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview') */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":290 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":319 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":320 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":321 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":322 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":323 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":324 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":325 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":326 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * self.lock = PyThread_allocate_lock() */ Py_INCREF(Py_None); /* "View.MemoryView":324 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":322 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":328 * Py_INCREF(Py_None) * * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock == NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":329 * * self.lock = PyThread_allocate_lock() * if self.lock == NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":330 * self.lock = PyThread_allocate_lock() * if self.lock == NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 330; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":329 * * self.lock = PyThread_allocate_lock() * if self.lock == NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":332 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = self.view.format == b'O' * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":333 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":332 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = self.view.format == b'O' * else: */ goto __pyx_L8; } /* "View.MemoryView":335 * self.dtype_is_object = self.view.format == b'O' * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L8:; /* "View.MemoryView":337 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":339 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":319 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":341 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":342 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":343 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * if self.lock != NULL: */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":342 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":345 * __Pyx_ReleaseBuffer(&self.view) * * if self.lock != NULL: # <<<<<<<<<<<<<< * PyThread_free_lock(self.lock) * */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":346 * * if self.lock != NULL: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); /* "View.MemoryView":345 * __Pyx_ReleaseBuffer(&self.view) * * if self.lock != NULL: # <<<<<<<<<<<<<< * PyThread_free_lock(self.lock) * */ } /* "View.MemoryView":341 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":348 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":350 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":352 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":353 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":352 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":355 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":348 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":358 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":359 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":360 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":359 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":362 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":365 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_2) { /* "View.MemoryView":366 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":365 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":368 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 368; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":369 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":358 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":371 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":372 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":374 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":375 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":376 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":377 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":376 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":379 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":374 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":381 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":371 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":383 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":384 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":386 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":387 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":386 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":385 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":388 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":389 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":385 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L11_try_end:; } /* "View.MemoryView":384 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":391 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":383 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":393 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":397 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":398 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":399 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":397 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":393 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":401 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[128]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":403 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":408 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":410 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":411 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":412 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":413 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":412 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":414 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":410 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":416 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":418 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":419 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":420 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":419 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":422 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":426 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":427 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":426 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":428 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":431 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } /*exception exit:*/{ __pyx_L6_error:; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":401 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":433 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":434 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":435 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 435; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":433 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":437 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":440 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 440; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":443 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":444 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":445 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":444 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":449 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":450 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 450; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}; __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":449 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":451 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":446 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_12 = PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_12) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":447 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 447; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 447; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":444 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":437 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":453 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; Py_ssize_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; char *__pyx_t_10; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":456 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":461 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":462 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":461 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":464 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":466 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 466; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_9 = __pyx_v_bytesvalue; __pyx_t_11 = PyBytes_AS_STRING(__pyx_t_9); __pyx_t_12 = (__pyx_t_11 + PyBytes_GET_SIZE(__pyx_t_9)); for (__pyx_t_13 = __pyx_t_11; __pyx_t_13 < __pyx_t_12; __pyx_t_13++) { __pyx_t_10 = __pyx_t_13; __pyx_v_c = (__pyx_t_10[0]); /* "View.MemoryView":467 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_7; /* "View.MemoryView":466 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":467 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":453 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":470 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":471 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":472 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":471 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":474 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":476 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":477 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":476 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":479 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":481 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":482 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":481 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":484 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":486 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":487 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":486 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":489 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":491 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":492 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":493 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":494 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":495 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":496 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":470 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":503 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":504 * @cname('__pyx_memoryview_transpose') * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":505 * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":506 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * property base: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":503 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":510 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":511 * @cname('__pyx_memoryview__get__base') * def __get__(self): * return self.obj # <<<<<<<<<<<<<< * * property shape: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":510 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":515 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":516 * @cname('__pyx_memoryview_get_shape') * def __get__(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property strides: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":515 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":520 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":521 * @cname('__pyx_memoryview_get_strides') * def __get__(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":521 * @cname('__pyx_memoryview_get_strides') * def __get__(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":525 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property suboffsets: */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":520 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":529 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":530 * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":531 * def __get__(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__9, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":530 * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":533 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property ndim: */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":529 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":537 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":538 * @cname('__pyx_memoryview_get_ndim') * def __get__(self): * return self.view.ndim # <<<<<<<<<<<<<< * * property itemsize: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 538; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":537 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":542 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":543 * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * property nbytes: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 543; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":542 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":547 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":548 * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * property size: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":547 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":552 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":553 * @cname('__pyx_memoryview_get_size') * def __get__(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":554 * def __get__(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":556 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 556; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":557 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":559 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":553 * @cname('__pyx_memoryview_get_size') * def __get__(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":561 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":552 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":564 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":565 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":564 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":567 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":563 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":569 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":570 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":571 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":570 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":569 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":573 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":574 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":573 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":577 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":580 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":581 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":577 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":583 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":586 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":587 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":583 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":589 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":591 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":593 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":594 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":599 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":589 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":603 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":605 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":606 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 606; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":611 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 611; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":601 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":616 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":617 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":618 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":615 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":621 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":622 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":621 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":624 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":629 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":630 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":629 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":632 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":634 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":635 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":636 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":637 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":638 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":639 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":640 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__10); __Pyx_GIVEREF(__pyx_slice__10); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__10); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":641 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":639 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":643 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__11); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L7:; /* "View.MemoryView":644 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":638 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":646 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":647 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":646 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":649 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":650 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L6:; /* "View.MemoryView":637 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":652 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":653 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":654 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__12); __Pyx_GIVEREF(__pyx_slice__12); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__12); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":653 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":656 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":624 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":658 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":659 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":660 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":661 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":660 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":658 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":668 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":669 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":676 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":680 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 680; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /* "View.MemoryView":682 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":683 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 683; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":684 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":682 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":686 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":687 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":693 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":694 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":699 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":700 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":704 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":705 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":709 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":706 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":705 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":712 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":713 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":714 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":715 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1; /* "View.MemoryView":716 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":712 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":718 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":719 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":720 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":722 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":723 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":724 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":726 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":732 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":704 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":734 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":735 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":736 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 736; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":737 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 737; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":735 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":734 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":740 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":741 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 740; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":740 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 740; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":668 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":765 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":785 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":787 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":788 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":787 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":789 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":790 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":789 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":785 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":793 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":795 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":796 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":795 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":799 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":800 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":801 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":802 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":803 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":802 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":800 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":804 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":805 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":806 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":805 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":808 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":804 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":799 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":810 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":811 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":810 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":813 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":815 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":816 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":817 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":818 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":819 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":818 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":816 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":820 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":821 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":820 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":815 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":823 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":824 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1; /* "View.MemoryView":823 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":826 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":828 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":829 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":828 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":833 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":835 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":836 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":835 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":838 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":838 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":842 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":843 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":844 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":847 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":847 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":850 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":852 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":855 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":854 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":857 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":858 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L26:; /* "View.MemoryView":853 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":860 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":852 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":862 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":765 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":868 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":870 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1; /* "View.MemoryView":871 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":874 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":875 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 875; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 875; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":876 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":874 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":878 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":879 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":880 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":880 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":883 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":884 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":885 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":886 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":885 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":883 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":888 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":889 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":888 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":891 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":892 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":893 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":892 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":895 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":868 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":901 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":902 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":904 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":905 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":909 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":910 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":911 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":912 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":914 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":915 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 915; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":914 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":917 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":901 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":934 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":935 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":934 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":937 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":938 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":939 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 939; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":938 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":941 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 941; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":937 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":944 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":945 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":944 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":947 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * property base: */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 947; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":943 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":951 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":952 * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":951 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":958 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":966 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":967 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":966 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":972 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":974 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":975 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":977 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":978 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":980 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":981 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":982 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":983 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":984 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":986 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":988 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":989 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":992 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":993 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":994 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":995 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":996 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":994 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":998 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":999 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 999; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1000 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1002 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1003 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1005 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":958 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1008 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1011 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1012 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1013 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1011 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1015 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1016 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1008 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1019 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1023 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1024 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1025 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1027 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1028 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1030 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1031 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1032 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1033 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1019 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1036 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1039 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1040 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1036 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1043 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1050 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1051 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1052 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1050 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1054 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1055 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1057 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1059 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1057; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1043 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1065 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1066 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1067 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1066 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1069 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1065 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1072 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1077 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1078 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1080 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1081 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1082 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1083 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1081 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1085 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1086 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1087 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1088 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1086 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1090 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1091 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1090 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1093 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1072 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1096 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1103 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1104 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1105 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1106 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1108 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1109 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1110 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1109 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1111 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1109 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1113 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1114 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1115 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1116 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1108 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1118 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1119 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1123 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1124 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1096 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1126 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1129 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1126 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1133 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1136 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1138 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1139 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1141 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1133 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1144 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1153 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1154 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1155 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1156 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1153 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1158 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1159 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1160 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1162 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1144 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1165 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1176 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1177 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1179 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1180 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1181 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1180 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1184 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1185 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1186 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1187 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1188 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1; } /* "View.MemoryView":1190 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1194 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1195 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1196 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src, order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1195 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1198 * tmpslice.strides[i] = 0 * * if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1199 * * if slice_is_contig(src, order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1198 * tmpslice.strides[i] = 0 * * if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1201 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1203 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1165 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1211 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1210 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1210; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1214 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1215 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1214 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1218 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1219 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1220 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1219 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1222 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":1218 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1225 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1233 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1234 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1236 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1237 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1238 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1241 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1241 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1243 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1244 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1243 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1246 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1248 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1249 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1250 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1251 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1252 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1250 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1254 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1254; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L7:; /* "View.MemoryView":1249 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1256 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1257 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1256 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1259 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(&src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1261 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1262 * * if not slice_is_contig(&src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1261 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1264 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1265 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1259 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(&src, order, ndim): */ } /* "View.MemoryView":1267 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1270 * * * if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim); /* "View.MemoryView":1270 * * * if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1272 * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1273 * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim); /* "View.MemoryView":1272 * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1275 * direct_copy = slice_is_contig(&dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1277 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1278 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1279 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1280 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1281 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1275 * direct_copy = slice_is_contig(&dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1267 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1283 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1286 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1287 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1287; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1283 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1289 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1290 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1291 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1293 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1294 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1225 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1297 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1301 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1303 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1304 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1305 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1306 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1308 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1309 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1310 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1311 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1; } /* "View.MemoryView":1297 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1319 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1323 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1324 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1323 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1319 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1328 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1331 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1328 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1334 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1338 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1339 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1340 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1341 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1340 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1343 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1339 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1345 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1346 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1348 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1334 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1354 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1357 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1358 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1360 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1354 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1364 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1368 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1369 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1371 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1372 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1373 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1374 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1371 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1376 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1377 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1379 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1364 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return get_memview(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { 0, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "tribeflow._eval.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "tribeflow._eval.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_transpose(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview__get__base(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_shape(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_strides(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_suboffsets(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_ndim(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_itemsize(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_nbytes(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_size(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "tribeflow._eval.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryviewslice__get__base(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "tribeflow._eval._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "_eval", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Count_sz, __pyx_k_Count_sz, sizeof(__pyx_k_Count_sz), 0, 0, 1, 1}, {&__pyx_n_s_Count_zh, __pyx_k_Count_zh, sizeof(__pyx_k_Count_zh), 0, 0, 1, 1}, {&__pyx_n_s_Dts, __pyx_k_Dts, sizeof(__pyx_k_Dts), 0, 0, 1, 1}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_n_s_HOs, __pyx_k_HOs, sizeof(__pyx_k_HOs), 0, 0, 1, 1}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_Psi_sz, __pyx_k_Psi_sz, sizeof(__pyx_k_Psi_sz), 0, 0, 1, 1}, {&__pyx_n_s_Theta_zh, __pyx_k_Theta_zh, sizeof(__pyx_k_Theta_zh), 0, 0, 1, 1}, {&__pyx_n_s_Trace, __pyx_k_Trace, sizeof(__pyx_k_Trace), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_alpha_zh, __pyx_k_alpha_zh, sizeof(__pyx_k_alpha_zh), 0, 0, 1, 1}, {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_beta_zs, __pyx_k_beta_zs, sizeof(__pyx_k_beta_zs), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_candidate_o, __pyx_k_candidate_o, sizeof(__pyx_k_candidate_o), 0, 0, 1, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_count_h, __pyx_k_count_h, sizeof(__pyx_k_count_h), 0, 0, 1, 1}, {&__pyx_n_s_count_z, __pyx_k_count_z, sizeof(__pyx_k_count_z), 0, 0, 1, 1}, {&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1}, {&__pyx_n_s_dt, __pyx_k_dt, sizeof(__pyx_k_dt), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, {&__pyx_kp_s_home_mohame11_outlierDetection, __pyx_k_home_mohame11_outlierDetection, sizeof(__pyx_k_home_mohame11_outlierDetection), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_i4, __pyx_k_i4, sizeof(__pyx_k_i4), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_idx, __pyx_k_idx, sizeof(__pyx_k_idx), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_kernel, __pyx_k_kernel, sizeof(__pyx_k_kernel), 0, 0, 1, 1}, {&__pyx_n_s_last_o, __pyx_k_last_o, sizeof(__pyx_k_last_o), 0, 0, 1, 1}, {&__pyx_n_s_ll_per_z, __pyx_k_ll_per_z, sizeof(__pyx_k_ll_per_z), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mem, __pyx_k_mem, sizeof(__pyx_k_mem), 0, 0, 1, 1}, {&__pyx_n_s_mem_factor, __pyx_k_mem_factor, sizeof(__pyx_k_mem_factor), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_ns, __pyx_k_ns, sizeof(__pyx_k_ns), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_nz, __pyx_k_nz, sizeof(__pyx_k_nz), 0, 0, 1, 1}, {&__pyx_n_s_o, __pyx_k_o, sizeof(__pyx_k_o), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_p, __pyx_k_p, sizeof(__pyx_k_p), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_predictions, __pyx_k_predictions, sizeof(__pyx_k_predictions), 0, 0, 1, 1}, {&__pyx_n_s_previous_stamps, __pyx_k_previous_stamps, sizeof(__pyx_k_previous_stamps), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_quality_estimate, __pyx_k_quality_estimate, sizeof(__pyx_k_quality_estimate), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_real_o, __pyx_k_real_o, sizeof(__pyx_k_real_o), 0, 0, 1, 1}, {&__pyx_n_s_reciprocal_rank, __pyx_k_reciprocal_rank, sizeof(__pyx_k_reciprocal_rank), 0, 0, 1, 1}, {&__pyx_n_s_return_probs, __pyx_k_return_probs, sizeof(__pyx_k_return_probs), 0, 0, 1, 1}, {&__pyx_n_s_rrs, __pyx_k_rrs, sizeof(__pyx_k_rrs), 0, 0, 1, 1}, {&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tribeflow__eval, __pyx_k_tribeflow__eval, sizeof(__pyx_k_tribeflow__eval), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1}, {&__pyx_n_s_z, __pyx_k_z, sizeof(__pyx_k_z), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { #if PY_MAJOR_VERSION >= 3 __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":129 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":132 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":135 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":144 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":172 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":188 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":447 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 447; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":523 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":531 * def __get__(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__9 = PyTuple_New(1); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__9, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":640 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__10 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__10); __Pyx_GIVEREF(__pyx_slice__10); /* "View.MemoryView":643 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__11 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__11); __Pyx_GIVEREF(__pyx_slice__11); /* "View.MemoryView":654 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__12 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__12); __Pyx_GIVEREF(__pyx_slice__12); /* "View.MemoryView":661 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "tribeflow/_eval.pyx":21 * double log(double) nogil * * def quality_estimate(double[:,::1] Dts, int[:,::1] Trace, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, int[:,::1] Count_zh, int[:,::1] Count_sz, \ * int[::1] count_h, int[::1] count_z, double alpha_zh, double beta_zs, \ */ __pyx_tuple__14 = PyTuple_Pack(20, __pyx_n_s_Dts, __pyx_n_s_Trace, __pyx_n_s_previous_stamps, __pyx_n_s_Count_zh, __pyx_n_s_Count_sz, __pyx_n_s_count_h, __pyx_n_s_count_z, __pyx_n_s_alpha_zh, __pyx_n_s_beta_zs, __pyx_n_s_ll_per_z, __pyx_n_s_idx, __pyx_n_s_kernel, __pyx_n_s_nz, __pyx_n_s_ns, __pyx_n_s_i, __pyx_n_s_h, __pyx_n_s_z, __pyx_n_s_o, __pyx_n_s_dt, __pyx_n_s_j); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); __pyx_codeobj__15 = (PyObject*)__Pyx_PyCode_New(12, 0, 20, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__14, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mohame11_outlierDetection, __pyx_n_s_quality_estimate, 21, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "tribeflow/_eval.pyx":48 * log(kernel.pdf(dt, z, previous_stamps)) * * def reciprocal_rank(double[:, ::1] Dts, int[:, ::1] HOs, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, double[:, ::1] Theta_zh, \ * double[:, ::1] Psi_sz, int[::1] count_z, Kernel kernel, */ __pyx_tuple__16 = PyTuple_Pack(23, __pyx_n_s_Dts, __pyx_n_s_HOs, __pyx_n_s_previous_stamps, __pyx_n_s_Theta_zh, __pyx_n_s_Psi_sz, __pyx_n_s_count_z, __pyx_n_s_kernel, __pyx_n_s_return_probs, __pyx_n_s_dt, __pyx_n_s_h, __pyx_n_s_s, __pyx_n_s_real_o, __pyx_n_s_candidate_o, __pyx_n_s_last_o, __pyx_n_s_z, __pyx_n_s_ns, __pyx_n_s_mem, __pyx_n_s_mem_factor, __pyx_n_s_p, __pyx_n_s_rrs, __pyx_n_s_predictions, __pyx_n_s_i, __pyx_n_s_j); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); __pyx_codeobj__17 = (PyObject*)__Pyx_PyCode_New(8, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__16, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mohame11_outlierDetection, __pyx_n_s_reciprocal_rank, 48, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":278 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "View.MemoryView":279 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "View.MemoryView":280 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__20)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "View.MemoryView":283 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":284 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__22)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_eval(void); /*proto*/ PyMODINIT_FUNC init_eval(void) #else PyMODINIT_FUNC PyInit__eval(void); /*proto*/ PyMODINIT_FUNC PyInit__eval(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__eval(void)", 0); if (__Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("_eval", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_tribeflow___eval) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "tribeflow._eval")) { if (unlikely(PyDict_SetItemString(modules, "tribeflow._eval", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_array.tp_print = 0; __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_MemviewEnum.tp_print = 0; __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 923; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 923; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_9tribeflow_13mycollections_11stamp_lists_StampLists = __Pyx_ImportType("tribeflow.mycollections.stamp_lists", "StampLists", sizeof(struct __pyx_obj_9tribeflow_13mycollections_11stamp_lists_StampLists), 1); if (unlikely(!__pyx_ptype_9tribeflow_13mycollections_11stamp_lists_StampLists)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_vtabptr_9tribeflow_13mycollections_11stamp_lists_StampLists = (struct __pyx_vtabstruct_9tribeflow_13mycollections_11stamp_lists_StampLists*)__Pyx_GetVtable(__pyx_ptype_9tribeflow_13mycollections_11stamp_lists_StampLists->tp_dict); if (unlikely(!__pyx_vtabptr_9tribeflow_13mycollections_11stamp_lists_StampLists)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_9tribeflow_7kernels_4base_Kernel = __Pyx_ImportType("tribeflow.kernels.base", "Kernel", sizeof(struct __pyx_obj_9tribeflow_7kernels_4base_Kernel), 1); if (unlikely(!__pyx_ptype_9tribeflow_7kernels_4base_Kernel)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_vtabptr_9tribeflow_7kernels_4base_Kernel = (struct __pyx_vtabstruct_9tribeflow_7kernels_4base_Kernel*)__Pyx_GetVtable(__pyx_ptype_9tribeflow_7kernels_4base_Kernel->tp_dict); if (unlikely(!__pyx_vtabptr_9tribeflow_7kernels_4base_Kernel)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ __pyx_t_1 = __Pyx_ImportModule("tribeflow._learn"); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_ImportFunction(__pyx_t_1, "dir_posterior", (void (**)(void))&__pyx_f_9tribeflow_6_learn_dir_posterior, "double (double, double, double, double)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /* "tribeflow/_eval.pyx":16 * from tribeflow.mycollections.stamp_lists cimport StampLists * * import numpy as np # <<<<<<<<<<<<<< * * cdef extern from 'math.h': */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "tribeflow/_eval.pyx":21 * double log(double) nogil * * def quality_estimate(double[:,::1] Dts, int[:,::1] Trace, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, int[:,::1] Count_zh, int[:,::1] Count_sz, \ * int[::1] count_h, int[::1] count_z, double alpha_zh, double beta_zs, \ */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_9tribeflow_5_eval_1quality_estimate, NULL, __pyx_n_s_tribeflow__eval); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_quality_estimate, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "tribeflow/_eval.pyx":48 * log(kernel.pdf(dt, z, previous_stamps)) * * def reciprocal_rank(double[:, ::1] Dts, int[:, ::1] HOs, \ # <<<<<<<<<<<<<< * StampLists previous_stamps, double[:, ::1] Theta_zh, \ * double[:, ::1] Psi_sz, int[::1] count_z, Kernel kernel, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_9tribeflow_5_eval_3reciprocal_rank, NULL, __pyx_n_s_tribeflow__eval); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_reciprocal_rank, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "tribeflow/_eval.pyx":1 * #-*- coding: utf8 # <<<<<<<<<<<<<< * # cython: boundscheck = False * # cython: cdivision = True */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":205 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":278 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":279 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":280 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":283 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":284 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":498 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":954 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "View.MemoryView":1364 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init tribeflow._eval", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init tribeflow._eval"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); Py_FatalError(msg); va_end(vargs); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject* args = PyTuple_Pack(1, arg); return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL; } #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_COMPILING_IN_CPYTHON #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { length = strlen(cstring); if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); #else PyErr_GetExcInfo(type, value, tb); #endif } static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(type, value, tb); #endif } static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { PyObject *local_type, *local_value, *local_tb; #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_COMPILING_IN_CPYTHON tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #else PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (PyErr_ExceptionMatches(PyExc_OverflowError)) PyErr_Clear(); else return NULL; } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS && PY_MAJOR_VERSION >= 3 if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | digits[0])); goto long_long; } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | digits[0])); goto long_long; } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); goto long_long; } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); goto long_long; } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); goto long_long; } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); goto long_long; } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); long_long: llx = lla + llb; return PyLong_FromLongLong(llx); } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #endif __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } static void* __Pyx_GetVtable(PyObject *dict) { void* ptr; PyObject *ob = PyObject_GetItem(dict, __pyx_n_s_pyx_vtable); if (!ob) goto bad; #if PY_VERSION_HEX >= 0x02070000 ptr = PyCapsule_GetPointer(ob, 0); #else ptr = PyCObject_AsVoidPtr(ob); #endif if (!ptr && !PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type"); Py_DECREF(ob); return ptr; bad: Py_XDECREF(ob); return NULL; } static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } Py_DECREF(obj); view->obj = NULL; } #endif static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 2, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) digits[0]) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) -(((((int)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) -(((((((int)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) -(((((((((int)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } static PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs->memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize) return 0; itemsize *= mvs->shape[index]; } return 1; } static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, -(sdigit) digits[0]) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) -(((((char)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) -(((((((char)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) -(((((((((char)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) digits[0]) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) -(((((long)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) -(((((((long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) -(((((((((long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | digits[2]) << PyLong_SHIFT) | digits[1]) << PyLong_SHIFT) | digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
residualbased_linear_strategy.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUALBASED_LINEAR_STRATEGY ) #define KRATOS_RESIDUALBASED_LINEAR_STRATEGY // System includes // External includes // Project includes #include "includes/define.h" #include "solving_strategies/strategies/solving_strategy.h" #include "utilities/builtin_timer.h" //default builder and solver #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedLinearStrategy * @ingroup KratosCore * @brief This is a very simple strategy to solve linearly the problem * @details As a linear strategy the check on the convergence is not done and just one non linear iteration will be performed * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedLinearStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions */ ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedLinearStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; ///@} ///@name Life Cycle ///@{ /** * Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param CalculateReactionFlag The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param CalculateNormDxFlag The flag sets if the norm of Dx is computed * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedLinearStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, bool CalculateReactionFlag = false, bool ReformDofSetAtEachStep = false, bool CalculateNormDxFlag = false, bool MoveMeshFlag = false ) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag) { KRATOS_TRY mCalculateReactionsFlag = CalculateReactionFlag; mReformDofSetAtEachStep = ReformDofSetAtEachStep; mCalculateNormDxFlag = CalculateNormDxFlag; // Saving the scheme mpScheme = pScheme; // Saving the linear solver mpLinearSolver = pNewLinearSolver; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer ( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (mpLinearSolver) ); // Set flag to start correcty the calculations mSolutionStepIsInitialized = false; mInitializeWasPerformed = false; // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) this->SetEchoLevel(1); // By default the matrices are rebuilt at each solution step BaseType::SetRebuildLevel(1); KRATOS_CATCH("") } /** * Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewBuilderAndSolver The builder and solver employed * @param CalculateReactionFlag The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param CalculateNormDxFlag The flag sets if the norm of Dx is computed * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedLinearStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, bool CalculateReactionFlag = false, bool ReformDofSetAtEachStep = false, bool CalculateNormDxFlag = false, bool MoveMeshFlag = false ) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag) { KRATOS_TRY mCalculateReactionsFlag = CalculateReactionFlag; mReformDofSetAtEachStep = ReformDofSetAtEachStep; mCalculateNormDxFlag = CalculateNormDxFlag; // Saving the scheme mpScheme = pScheme; // Saving the linear solver mpLinearSolver = pNewLinearSolver; // Setting up the builder and solver mpBuilderAndSolver = pNewBuilderAndSolver; // Set flag to start correcty the calculations mSolutionStepIsInitialized = false; mInitializeWasPerformed = false; // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); //set EchoLevel to the default value (only time is displayed) this->SetEchoLevel(1); // By default the matrices are rebuilt at each solution step BaseType::SetRebuildLevel(1); KRATOS_CATCH("") } /** * @brief Destructor. * @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear(). */ ~ResidualBasedLinearStrategy() override { // If the linear solver has not been deallocated, clean it before // deallocating mpA. This prevents a memory error with the the ML // solver (which holds a reference to it). auto p_linear_solver = GetBuilderAndSolver()->GetLinearSystemSolver(); if (p_linear_solver != nullptr) p_linear_solver->Clear(); // Deallocating system vectors to avoid errors in MPI. Clear calls // TrilinosSpace::Clear for the vectors, which preserves the Map of // current vectors, performing MPI calls in the process. Due to the // way Python garbage collection works, this may happen after // MPI_Finalize has already been called and is an error. Resetting // the pointers here prevents Clear from operating with the // (now deallocated) vectors. mpA.reset(); mpDx.reset(); mpb.reset(); this->Clear(); } /** * @brief Set method for the time scheme * @param pScheme The pointer to the time scheme considered */ void SetScheme(typename TSchemeType::Pointer pScheme) { mpScheme = pScheme; }; /** * @brief Get method for the time scheme * @return mpScheme: The pointer to the time scheme considered */ typename TSchemeType::Pointer GetScheme() { return mpScheme; }; /** * @brief Set method for the builder and solver * @param pNewBuilderAndSolver The pointer to the builder and solver considered */ void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver) { mpBuilderAndSolver = pNewBuilderAndSolver; }; /** * @brief Get method for the builder and solver * @return mpBuilderAndSolver: The pointer to the builder and solver considered */ typename TBuilderAndSolverType::Pointer GetBuilderAndSolver() { return mpBuilderAndSolver; }; /** * @brief This method sets the flag mCalculateReactionsFlag * @param CalculateReactionsFlag The flag that tells if the reactions are computed */ void SetCalculateReactionsFlag(bool CalculateReactionsFlag) { mCalculateReactionsFlag = CalculateReactionsFlag; GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); } /** * @brief This method returns the flag mCalculateReactionsFlag * @return The flag that tells if the reactions are computed */ bool GetCalculateReactionsFlag() { return mCalculateReactionsFlag; } /** * @brief This method sets the flag mReformDofSetAtEachStep * @param Flag The flag that tells if each time step the system is rebuilt */ void SetReformDofSetAtEachStepFlag(bool Flag) { mReformDofSetAtEachStep = Flag; GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } /** * @brief This method returns the flag mReformDofSetAtEachStep * @return The flag that tells if each time step the system is rebuilt */ bool GetReformDofSetAtEachStepFlag() { return mReformDofSetAtEachStep; } /** * @brief It sets the level of echo for the solving strategy * @param Level The level to set * @details The different levels of echo are: * - 0: Mute... no echo at all * - 1: Printing time and basic informations * - 2: Printing linear solver data * - 3: Print of debug informations: Echo of stiffness matrix, Dx, b... */ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); GetBuilderAndSolver()->SetEchoLevel(Level); } //********************************************************************************* /**OPERATIONS ACCESSIBLE FROM THE INPUT:*/ /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY //OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions //if the operations needed were already performed this does nothing if(mInitializeWasPerformed == false) Initialize(); //initialize solution step if (mSolutionStepIsInitialized == false) InitializeSolutionStep(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet(); this->GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); if(BaseType::GetModelPart().MasterSlaveConstraints().size() != 0) { const auto& rProcessInfo = BaseType::GetModelPart().GetProcessInfo(); auto it_begin = BaseType::GetModelPart().MasterSlaveConstraints().begin(); #pragma omp parallel for firstprivate(it_begin) for(int i=0; i<static_cast<int>(BaseType::GetModelPart().MasterSlaveConstraints().size()); ++i) (it_begin+i)->ResetSlaveDofs(rProcessInfo); #pragma omp parallel for firstprivate(it_begin) for(int i=0; i<static_cast<int>(BaseType::GetModelPart().MasterSlaveConstraints().size()); ++i) (it_begin+i)->Apply(rProcessInfo); //the following is needed since we need to eventually compute time derivatives after applying //Master slave relations TSparseSpace::SetToZero(rDx); this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); } if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh(); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY if (mInitializeWasPerformed == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); //Initialize The Scheme - OPERATIONS TO BE DONE ONCE if (p_scheme->SchemeIsInitialized() == false) p_scheme->Initialize(BaseType::GetModelPart()); //Initialize The Elements - OPERATIONS TO BE DONE ONCE if (p_scheme->ElementsAreInitialized() == false) p_scheme->InitializeElements(BaseType::GetModelPart()); //Initialize The Conditions - OPERATIONS TO BE DONE ONCE if (p_scheme->ConditionsAreInitialized() == false) p_scheme->InitializeConditions(BaseType::GetModelPart()); mInitializeWasPerformed = true; } KRATOS_CATCH("") } /** * @brief The problem of interest is solved * @details a double containing norm(Dx) is returned if CalculateNormDxFlag == true, else 0 is returned * @return norm(Dx) */ double Solve() override { BaseType::Solve(); //calculate if needed the norm of Dx double norm_dx = 0.00; if (mCalculateNormDxFlag == true) norm_dx = TSparseSpace::TwoNorm(*mpDx); return norm_dx; } /** * @brief Clears the internal storage * @note NULL could be changed to nullptr in the future (c++11) */ void Clear() override { KRATOS_TRY; // If the preconditioner is saved between solves, it // should be cleared here. GetBuilderAndSolver()->GetLinearSystemSolver()->Clear(); if (mpA != NULL) SparseSpaceType::Clear(mpA); if (mpDx != NULL) SparseSpaceType::Clear(mpDx); if (mpb != NULL) SparseSpaceType::Clear(mpb); // Setting to zero the internal flag to ensure that the dof sets are recalculated GetBuilderAndSolver()->SetDofSetIsInitializedFlag(false); GetBuilderAndSolver()->Clear(); GetScheme()->Clear(); mInitializeWasPerformed = false; mSolutionStepIsInitialized = false; KRATOS_CATCH(""); } /** * @brief This operations should be called before printing the results when non trivial results (e.g. stresses) need to be calculated given the solution of the step *@details This operations should be called only when needed, before printing as it can involve a non negligible cost */ void CalculateOutputData() override { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; GetScheme()->CalculateOutputData(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); } /** * @brief Performs all the required operations that should be done (for each step) before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. * @todo Boost dependencies should be replaced by std equivalent */ void InitializeSolutionStep() override { KRATOS_TRY if (mSolutionStepIsInitialized == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); const int rank = BaseType::GetModelPart().GetCommunicator().MyPID(); //set up the system, operation performed just once unless it is required //to reform the dof set at each iteration BuiltinTimer system_construction_time; if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false || mReformDofSetAtEachStep == true) { //setting up the list of the DOFs to be solved BuiltinTimer setup_dofs_time; p_builder_and_solver->SetUpDofSet(p_scheme, BaseType::GetModelPart()); KRATOS_INFO_IF("Setup Dofs Time", BaseType::GetEchoLevel() > 0 && rank == 0) << setup_dofs_time.ElapsedSeconds() << std::endl; //shaping correctly the system BuiltinTimer setup_system_time; p_builder_and_solver->SetUpSystem(BaseType::GetModelPart()); KRATOS_INFO_IF("Setup System Time", BaseType::GetEchoLevel() > 0 && rank == 0) << setup_system_time.ElapsedSeconds() << std::endl; //setting up the Vectors involved to the correct size BuiltinTimer system_matrix_resize_time; p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb, BaseType::GetModelPart()); KRATOS_INFO_IF("System Matrix Resize Time", BaseType::GetEchoLevel() > 0 && rank == 0) << system_matrix_resize_time.ElapsedSeconds() << std::endl; } KRATOS_INFO_IF("System Construction Time", BaseType::GetEchoLevel() > 0 && rank == 0) << system_construction_time.ElapsedSeconds() << std::endl; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //initial operations ... things that are constant over the Solution Step p_builder_and_solver->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); //initial operations ... things that are constant over the Solution Step p_scheme->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); mSolutionStepIsInitialized = true; } KRATOS_CATCH("") } /** * @brief Performs all the required operations that should be done (for each step) after solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void FinalizeSolutionStep() override { KRATOS_TRY; typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType &rA = *mpA; TSystemVectorType &rDx = *mpDx; TSystemVectorType &rb = *mpb; //Finalisation of the solution step, //operations to be done after achieving convergence, for example the //Final Residual Vector (mb) has to be saved in there //to avoid error accumulation p_scheme->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); p_builder_and_solver->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb); //Cleaning memory after the solution p_scheme->Clean(); //reset flags for next step mSolutionStepIsInitialized = false; //deallocate the systemvectors if needed if (mReformDofSetAtEachStep == true) { SparseSpaceType::Clear(mpA); SparseSpaceType::Clear(mpDx); SparseSpaceType::Clear(mpb); this->Clear(); } KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; p_scheme->InitializeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb); if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); // passing smart pointers instead of references here // to prevent dangling pointer to system matrix when // reusing ml preconditioners in the trilinos tpl p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); BaseType::mStiffnessMatrixIsBuilt = true; } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); } // Debugging info EchoInfo(); //update results DofsArrayType& r_dof_set = p_builder_and_solver->GetDofSet(); p_scheme->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); //move the mesh if needed if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh(); p_scheme->FinalizeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb); // Calculate reactions if required if (mCalculateReactionsFlag == true) p_builder_and_solver->CalculateReactions(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); return true; } /** * @brief This method returns the LHS matrix * @return The LHS matrix */ TSystemMatrixType& GetSystemMatrix() { TSystemMatrixType& mA = *mpA; return mA; } /** * @brief This method returns the RHS vector * @return The RHS vector */ TSystemVectorType& GetSystemVector() { TSystemVectorType& mb = *mpb; return mb; } /** * @brief This method returns the solution vector * @return The Dx vector */ TSystemVectorType& GetSolutionVector() { TSystemVectorType& mDx = *mpDx; return mDx; } /** * @brief This method returns the residual norm * @return The residual norm */ double GetResidualNorm() override { if (TSparseSpace::Size(*mpb) != 0) return TSparseSpace::TwoNorm(*mpb); else return 0.0; } /** * @brief Function to perform expensive checks. * @details It is designed to be called ONCE to verify that the input is correct. */ int Check() override { KRATOS_TRY BaseType::Check(); GetBuilderAndSolver()->Check(BaseType::GetModelPart()); GetScheme()->Check(BaseType::GetModelPart()); return 0; KRATOS_CATCH("") } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedLinearStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename TLinearSolver::Pointer mpLinearSolver; /// The pointer to the linear solver considered typename TSchemeType::Pointer mpScheme; /// The pointer to the time scheme employed typename TBuilderAndSolverType::Pointer mpBuilderAndSolver; /// The pointer to the builder and solver employed TSystemVectorPointerType mpDx; /// The incremement in the solution TSystemVectorPointerType mpb; /// The RHS vector of the system of equations TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations /** * @brief Flag telling if it is needed to reform the DofSet at each solution step or if it is possible to form it just once * @details Default = false - true : Reforme at each time step - false : Form just one (more efficient) */ bool mReformDofSetAtEachStep; bool mCalculateNormDxFlag; /// Calculates if required the norm of the correction term Dx /** * @brief Flag telling if it is needed or not to compute the reactions * @details default = true */ bool mCalculateReactionsFlag; bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step bool mInitializeWasPerformed; /// Flag to set as initialized the strategy ///@} ///@name Private Operators*/ ///@{ /** * @brief This method returns the components of the system of equations depending of the echo level */ virtual void EchoInfo() { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (BaseType::GetEchoLevel() == 3) //if it is needed to print the debug info { KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } if (this->GetEchoLevel() == 4) //print to matrix market file { std::stringstream matrix_market_name; matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm"; TSparseSpace::WriteMatrixMarketMatrix((char*) (matrix_market_name.str()).c_str(), rA, false); std::stringstream matrix_market_vectname; matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm.rhs"; TSparseSpace::WriteMatrixMarketVector((char*) (matrix_market_vectname.str()).c_str(), rb); } } ///@} ///@name Private Operations*/ ///@{ ///@} ///@name Private Access */ ///@{ ///@} ///@name Private Inquiry */ ///@{ ///@} ///@name Un accessible methods */ ///@{ /** Copy constructor. */ ResidualBasedLinearStrategy(const ResidualBasedLinearStrategy& Other); ///@} }; /* Class ResidualBasedLinearStrategy */ ///@} ///@name Type Definitions */ ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_LINEAR_STRATEGY defined */
special_ops.h
#pragma once #include <ops/ops.h> #include <loops/reduce.h> #include <loops/scalar.h> #include <loops/indexreduce.h> #include <loops/broadcasting.h> namespace functions { namespace broadcast { template <typename T> class Broadcast; } namespace transform { template <typename T> class Transform; } namespace scalar { } namespace reduce { template <typename T> class ReduceFunction; } } namespace simdOps { template<typename T> class Pooling2D { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ int kH; __shared__ int kW; __shared__ int sH; __shared__ int sW; __shared__ int pH; __shared__ int pW; __shared__ int dH; __shared__ int dW; __shared__ int poolingMode; __shared__ T extraParam0; __shared__ int batchSize; __shared__ int inChannels; __shared__ int outH; __shared__ int outW; __shared__ int inH; __shared__ int inW; //__shared__ int *strideIn; //__shared__ int *strideOut; __shared__ int strideB; __shared__ int strideC; __shared__ int strideY; __shared__ int strideX; __shared__ int strideOB; __shared__ int strideOC; __shared__ int strideOY; __shared__ int strideOX; __shared__ int length; __shared__ int kHEff; __shared__ int kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { kH = (int)extraParams[0]; kW = (int)extraParams[1]; sH = (int)extraParams[2]; sW = (int)extraParams[3]; pH = (int)extraParams[4]; pW = (int)extraParams[5]; dH = (int)extraParams[6]; //Dilation, height dimension dW = (int)extraParams[7]; //Dilation, width dimension poolingMode = (int)extraParams[9]; extraParam0 = extraParams[10]; batchSize = shape::sizeAt(xShapeBuffer, 0); inChannels = shape::sizeAt(xShapeBuffer, 1); outH = shape::sizeAt(resultShapeBuffer, 2); outW = shape::sizeAt(resultShapeBuffer, 3); inH = shape::sizeAt(xShapeBuffer, 2); inW = shape::sizeAt(xShapeBuffer, 3); strideB = shape::stride(xShapeBuffer)[0]; strideC = shape::stride(xShapeBuffer)[1]; strideY = shape::stride(xShapeBuffer)[2]; strideX = shape::stride(xShapeBuffer)[3]; strideOB = shape::stride(resultShapeBuffer)[0]; strideOC = shape::stride(resultShapeBuffer)[1]; strideOY = shape::stride(resultShapeBuffer)[2]; strideOX = shape::stride(resultShapeBuffer)[3]; length = shape::length(resultShapeBuffer); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); fOrder = shape::order(resultShapeBuffer) == 'f'; /* if (blockIdx.x == 0) { printf("kH: %i; kW: %i; sH: %i; sW: %i; pH: %i; pW: %i; dH: %i; dW: %i; poolingMode: %i; extraParam0: %f;\n", kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, (float) extraParam0); printf("batchSize: %i; inChannels: %i; outH: %i; outW: %i; inH: %i; inW: %i; strideB: %i; strideC: %i; strideY: %i; strideX: %i;\n", batchSize, inChannels, outH, outW, inH, inW, strideB, strideC, strideY, strideX); } */ } __syncthreads(); int tid = blockIdx.x * gridDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % outW; const int ph = (index / outW) % outH; const int c = (index / outW / outH) % inChannels; const int n = index / outW / outH / inChannels; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; // const int hSO = hstart; // const int hEO = hend; if(hstart < 0){ int f = (int)nd4j::math::nd4j_ceil<T>((T) -hstart / (T)dH); hstart += f * dH; } if(wstart < 0){ int f = (int)nd4j::math::nd4j_ceil<T>((T) -wstart / (T) dW); wstart += f * dW; } if(hend > inH){ int f = (int)nd4j::math::nd4j_ceil<T>((T) (hend-inH) / (T) dH); hend -= f * dH; } if(wend > inW){ int f = (int)nd4j::math::nd4j_ceil<T>((T) (wend-inW) / (T) dW); wend -= f * dW; } int pool_size = (int)(nd4j::math::nd4j_ceil<T>((T) (hend-hstart) / (T) dH) * (int) nd4j::math::nd4j_ceil<T>((T) (wend-wstart) / (T) dW)); //Accounts for dilation T sum = poolingMode == 0 ? (T) -MAX_FLOAT : (T) 0; T *input_slice = dx + (n * strideB + c * strideC); if (poolingMode == 0) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { T v = input_slice[h * strideY + w * strideX]; if (v > sum) sum = v; } } } else if (poolingMode == 1) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += input_slice[h * strideY + w * strideX]; } } } else if (poolingMode == 2) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(input_slice[h * strideY + w * strideX]), extraParam0); } } } T res; if (poolingMode == 0) { res = sum; } else if (poolingMode == 1) { int divide_factor = pool_size; //Case 0: exclude padding if ((int) extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; res = sum / divide_factor; } else if (poolingMode == 2) { res = nd4j::math::nd4j_pow<T>(sum, (T) 1.0f / extraParam0); } if (!fOrder) { result[index] = res; } else { result[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = res; } /* if (index >= 0 && index < 400000) { printf("index: %i; hstart: %i; hend: %i; wstart: %i; wend: %i; ph: %i; pw: %i; hstart_orig: %i; hend_orig: %i;\n", index, hstart, hend, wstart, wend, ph, pw, hSO, hEO); } */ } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int kH = (int)extraParams[0]; int kW = (int)extraParams[1]; int sH = (int)extraParams[2]; int sW = (int)extraParams[3]; int pH = (int)extraParams[4]; int pW = (int)extraParams[5]; int dH = (int)extraParams[6]; //Dilation, height dimension int dW = (int)extraParams[7]; //Dilation, width dimension int poolingMode = (int)extraParams[9]; T extraParam0 = extraParams[10]; const int kHEff = kH + (kH-1)*(dH-1); const int kWEff = kW + (kW-1)*(dW-1); const int batchSize = (int) shape::sizeAt(xShapeBuffer, 0); const int inChannels = (int) shape::sizeAt(xShapeBuffer, 1); const int outH = (int) shape::sizeAt(resultShapeBuffer, 2); const int outW = (int) shape::sizeAt(resultShapeBuffer, 3); const int inH = (int) shape::sizeAt(xShapeBuffer, 2); const int inW = (int) shape::sizeAt(xShapeBuffer, 3); auto strideIn = shape::stride(xShapeBuffer); auto strideOut = shape::stride(resultShapeBuffer); const bool fOrder = shape::order(resultShapeBuffer) == 'f'; const Nd4jLong zLength = shape::length(resultShapeBuffer); const int zRank = shape::rank(resultShapeBuffer); int indices[6]; int idx = 0; #pragma omp parallel for collapse(2) schedule(guided) shared(indices) for(int k = 0; k < inChannels; k++) { for(int p = 0; p < batchSize; p++) { int xx, yy; /* For all output pixels... */ const int _b = p * strideOut[0]; const int _k = k * strideOut[1]; T *ptr_output = result + _b + _k; T *ptr_input = dx + p * strideIn[0] + k * strideIn[1]; for(yy = 0; yy < outH; yy++) { for(xx = 0; xx < outW; xx++) { /* Compute the mean of the input image... */ int hstart = yy * sH - pH; int wstart = xx * sW - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; const int hSO = hstart; const int hEO = hend; if(hstart < 0){ int n = (int)nd4j::math::nd4j_ceil<T>((T) -hstart / ((T)dH)); hstart += n * dH; } if(wstart < 0){ int n = (int)nd4j::math::nd4j_ceil<T>((T) -wstart / ((T)dW)); wstart += n * dW; } if(hend > inH){ int n = (int)nd4j::math::nd4j_ceil<T>((T)(hend-inH)/((T)dH)); hend -= n * dH; } if(wend > inW){ int n = (int)nd4j::math::nd4j_ceil<T>((T)(wend-inW)/((T)dW)); wend -= n * dW; } int pool_size = (int)(nd4j::math::nd4j_ceil<T>((T) (hend-hstart)/((T)dH)) * (int)nd4j::math::nd4j_ceil<T>((T)(wend-wstart)/((T)dW))); //Accounts for dilation T sum = poolingMode == 0 ? (T) -MAX_FLOAT : (T) 0; // we need this only for avg pooling int divide_factor = 0; if (poolingMode == 1) { if ((int) extraParam0 == 0) //Exclude padding divide_factor = pool_size; else if ((int) extraParam0 == 1) //Include padding divide_factor = kH * kW; } long kx, ky; if (poolingMode == 0) { #pragma omp simd reduction(maxT:sum) collapse(2) for (ky = hstart; ky < hend; ky += dH) { for (kx = wstart; kx < wend; kx += dW) if (ptr_input[ky * strideIn[2] + kx * strideIn[3]] > sum) sum = ptr_input[ky * strideIn[2] + kx * strideIn[3]]; } } else if (poolingMode == 1) { #pragma omp simd reduction(sumT:sum) collapse(2) for (ky = hstart; ky < hend; ky += dH) { for (kx = wstart; kx < wend; kx += dW) sum += ptr_input[ky * strideIn[2] + kx * strideIn[3]]; } } else if (poolingMode == 2) { #pragma omp simd reduction(sumT:sum) collapse (2) for (ky = hstart; ky < hend; ky += dH) { for (kx = wstart; kx < wend; kx += dW) sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(ptr_input[ky * strideIn[2] + kx * strideIn[3]]), extraParam0); } } /* Update output */ T res = sum; if (poolingMode == 1) { res /= divide_factor; } else if (poolingMode == 2) res = nd4j::math::nd4j_pow<T>(res, (T) 1.0f / extraParam0); if (!fOrder) { *ptr_output++ = res; } else { result[_b + _k + yy * strideOut[2] + xx * strideOut[3]] = res; } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; FORCEINLINE bool is_a_ge_zero_and_a_lt_b(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } template<typename T> class Im2col { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ int kernelHeight = (int)extraParams[0]; int kernelWidth = (int)extraParams[1]; int strideY = (int)extraParams[2]; int strideX = (int)extraParams[3]; int padHeight = (int)extraParams[4]; int padWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation, height/y dimension int dX = (int)extraParams[7]; //Dilation, width/x dimension int kSize = kernelWidth * kernelHeight; T zeroPadVal = (T)extraParams[9]; //Value to use when value is padding. Usually 0 but not always auto outShape = shape::shapeOf(resultShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int samples = inShape[0]; int depth = inShape[1]; int height = inShape[2]; int width = inShape[3]; int strideex = inStride[0]; int stridech = inStride[1]; int strideh = inStride[2]; int stridew = inStride[3]; // (height + 2 * padHeight - kernelHeight) / strideX + 1; // // (width + 2 * padWidth - kernelWidth) / strideY + 1; // int height_col = outShape[4]; int width_col = outShape[5]; int n = samples * depth * height_col * width_col; /* if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, height, width, depth, n, samples); */ int index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < n; index += blockDim.x*gridDim.x) { int h_index = index / width_col; int h_col = h_index % height_col; int w_col = index % width_col; int c_im = h_index / height_col; int c_col = c_im * kSize; int depth_im = c_im % depth; int num_im = c_im / depth; int h_offset = h_col * strideY - padHeight; int w_offset = w_col * strideX - padWidth; T* data_col_ptr = result; int i_c = (c_col * height_col + h_col) * width_col + w_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; T* data_im_ptr = dx; data_im_ptr += num_im * strideex + depth_im * stridech + h_offset * strideh + w_offset*stridew; for (int i = 0; i < kernelHeight; ++i) { for (int j = 0; j < kernelWidth; ++j) { int h_im = h_offset + i * dY; int w_im = w_offset + j * dX; int i_f = 0; int i_c_temp = i_c; for (int dim = 5; dim >= 0; dim--) { i_f += (i_c_temp % outShape[dim]) * outStride[dim]; i_c_temp = i_c_temp / outShape[dim]; } if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width){ result[i_f] = data_im_ptr[i * dY * strideh + j * dX * stridew]; } else result[i_f] = zeroPadVal; //result[i_f] = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * strideh + j*stridew] : 0; data_col_ptr += height_col * width_col; i_c += height_col * width_col; } } } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ T zeroPadVal = (T) 0.0f; int kH = (int)extraParams[0]; int kW = (int)extraParams[1]; int sH = (int)extraParams[2]; int sW = (int)extraParams[3]; int pH = (int)extraParams[4]; int pW = (int)extraParams[5]; int dH = (int)extraParams[6]; //Dilation, height/y dimension int dW = (int)extraParams[7]; //Dilation, width/x dimension auto outShape = shape::shapeOf(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); const int bS = inShape[0]; const int iC = inShape[1]; const int iH = inShape[2]; const int iW = inShape[3]; const int oH = outShape[4]; const int oW = outShape[5]; const int outStride0 = outStride[0]; const int outStride1 = outStride[1]; const int outStride2 = outStride[2]; const int outStride3 = outStride[3]; const int outStride4 = outStride[4]; const int outStride5 = outStride[5]; const int inStride0 = inStride[0]; const int inStride1 = inStride[1]; const int inStride2 = inStride[2]; const int inStride3 = inStride[3]; const T* in0End = dx + inStride1 * iC; const int kRowEnd = -pH + kH * dH; const int kColEnd = -pW + kW * dW; const int oHW = oH * oW; const int inRowEnd = oH * sH; const int inColEnd = oW * sW; int inRowStart, inColStart, inRow, inCol; T *in0, *in1; if (shape::order(xShapeBuffer) == 'c' && shape::order(resultShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(xShapeBuffer) && shape::strideDescendingCAscendingF(resultShapeBuffer)) { #pragma omp parallel for schedule(static) proc_bind(close) private(in0, in1, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { in0 = dx + (b * inStride0); T *output = result + (b * outStride0); for (int channel = 0; channel < iC; ++channel, in0 += inStride1) { for (int kRow = 0; kRow < kH; kRow++) { inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; kCol++) { inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow += sH) { if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) for (int outCol = 0; outCol < oW; ++outCol, ++output) { *output = zeroPadVal; } else { inCol = inColStart; in1 = in0 + inRow * inStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, ++output) if (is_a_ge_zero_and_a_lt_b(inCol, iW)) *output = *(in1 + inCol * inStride3); else *output = zeroPadVal; } } } } } } } else { T *out0, *out1, *out2, *out3, *out4; #pragma omp parallel for schedule(static) proc_bind(close) private(in0, in1, out0, out1, out2, out3, out4, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { in0 = dx + (b * inStride0); out0 = result + b * outStride0; for (int channel = 0; channel < iC; ++channel, in0 += inStride1, out0+=outStride1) { out1 = out0; for (int kRow = 0; kRow < kH; kRow++, out1 += outStride2) { out2 = out1; inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; kCol++, out2 += outStride3) { out3 = out2; inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow += sH, out3 += outStride4) { out4 = out3; if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) for (int outCol = 0; outCol < oW; ++outCol, out4 += outStride5) { *out4 = zeroPadVal; } else { inCol = inColStart; in1 = in0 + inRow * inStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, out4 += outStride5) { if (is_a_ge_zero_and_a_lt_b(inCol, iW)) *out4 = *(in1 + inCol * inStride3); else *out4 = zeroPadVal; } } } } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename T> class Histogram { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int numBins = (int) extraParams[0]; T min_val = extraParams[1]; T max_val = extraParams[2]; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ T *bins; __shared__ int length; __shared__ T *reductor; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; bins = (T *) shmem; reductor = ((T *) allocationPointer) + (numBins * blockIdx.x); length = shape::length(xShapeBuffer); } __syncthreads(); T binSize = (max_val - min_val) / (numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (T) 0.0f; } __syncthreads(); for (int e = tid; e < length; e+= blockDim.x * gridDim.x) { int idx = (int) ((dx[e] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; nd4j::math::atomics::nd4j_atomicAdd(&bins[idx], (T) 1.0f); } __syncthreads(); // transfer shared memory to reduction memory if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionPointer; __shared__ bool amLast; for (int e = threadIdx.x; e < numBins; e += blockDim.x) { reductor[e] = bins[e]; } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; // nullify shared memory for future accumulation for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (T) 0.0f; } // accumulate reduced bins for (int r = 0; r < gridDim.x; r++) { T *ptrBuf = ((T *)allocationPointer) + (r * numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] += ptrBuf[e]; } } __syncthreads(); // write them out to Z for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } } else { // if there's only 1 block - just write away data for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } }; #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int length = shape::length(xShapeBuffer); int _threads = 2; int numBins = (int) extraParams[0]; int span = (length / _threads) + 8; // get min over input T min_val = extraParams[1]; T max_val = extraParams[2]; /* #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(min:min_val) proc_bind(close) for (int x = 0; x < length; x++) { if (min_val > dx[x]) min_val = dx[x]; } // get max over input T max_val = (T) MIN_FLOAT; #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(max:max_val) proc_bind(close) for (int x = 0; x < length; x++) { if (max_val < dx[x]) max_val = dx[x]; } */ T binSize = (max_val - min_val) / (numBins); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(close) default(shared) { int tid, start, end; int *bins = new int[numBins]; std::memset(bins, 0, sizeof(int) * numBins); tid = omp_get_thread_num(); start = span * tid; end = span * (tid + 1); if (end > length) end = length; #pragma omp simd for (int x = start; x < end; x++) { int idx = (int) ((dx[x] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; bins[idx]++; } #pragma omp critical { #pragma omp simd for (int x = 0; x < numBins; x++) { result[x] += bins[x]; } } delete[] bins; } } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class Col2Im { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * https://github.com/pjreddie/darknet/blob/master/src/col2im_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int strideex = inStride[0]; int stridech = inStride[1]; int stridekrow = inStride[2]; int stridekcol = inStride[3]; int striderow = inStride[4]; int stridecol = inStride[5]; int kernelHeight = inShape[2]; int kernelWidth = inShape[3]; // C int strideY = (int)extraParams[0]; int strideX = (int)extraParams[1]; int padHeight = (int)extraParams[2]; int padWidth = (int)extraParams[3]; int imgHeight = (int)extraParams[4]; int imgWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation in height/y dimension int dX = (int)extraParams[7]; //Dilation in width/x dimension auto outShape = shape::shapeOf(resultShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); int samples = outShape[0]; int depth = outShape[1]; int imgH = outShape[2]; int imgW = outShape[3]; int height_col = inShape[4];//(imgHeight + 2 * padHeight - kernelHeight) / strideX + 1; int width_col = inShape[5];//(imgWidth + 2 * padWidth - kernelWidth) / strideY + 1; int n = samples * depth * imgHeight * imgWidth; /*if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, imgHeight, imgWidth, depth, n, samples);*/ //Effective kernel size, accounting for dilation int kEffectiveW = kernelWidth + (kernelWidth - 1) * (dX - 1); int kEffectiveH = kernelHeight + (kernelHeight - 1) * (dY - 1); for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { T val = 0; int w_im = i % imgWidth + padWidth; int h_im = (i / imgWidth) % imgHeight + padHeight; int c_im = i / (imgWidth * imgHeight); int num_im = c_im / depth; int depth_im = c_im % depth; // compute the start and end of the output // These are the indexes for dimensions ??? in the 6d col matrix int w_col_start = (w_im < kEffectiveW) ? 0 : (w_im - kEffectiveW) / strideX + 1; int w_col_end = nd4j::math::nd4j_min<int>(w_im / strideX + 1, width_col); int h_col_start = (h_im < kEffectiveH) ? 0 : (h_im - kEffectiveH) / strideY + 1; int h_col_end = nd4j::math::nd4j_min<int>(h_im / strideY + 1, height_col); //Iterate over col entries in the 6d array... these are added up for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * strideY); int w_k = (w_im - w_col * strideX); if(h_k % dY == 0 && w_k % dX == 0){ h_k /= dY; w_k /= dX; int data_col_index = num_im * strideex + depth_im * stridech + h_k * stridekrow + w_k * stridekcol + h_col * striderow + w_col * stridecol; val += dx[data_col_index]; } } } int i_f = 0; int i_c = i; for (int dim = 3; dim >= 0; dim--) { i_f += (i_c % outShape[dim]) * outStride[dim]; i_c = i_c / outShape[dim]; } result[i_f] = val; } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { const Nd4jLong *inShape = shape::shapeOf(xShapeBuffer); const Nd4jLong *inStride = shape::stride(xShapeBuffer); const Nd4jLong *outShape = shape::shapeOf(xShapeBuffer); const Nd4jLong *outStride = shape::stride(resultShapeBuffer); const int kH = inShape[2]; const int kW = inShape[3]; const int bS = outShape[0]; const int iC = outShape[1]; const int oH = inShape[4]; const int oW = inShape[5]; const int sH = (int)extraParams[0]; const int sW = (int)extraParams[1]; const int pH = (int)extraParams[2]; const int pW = (int)extraParams[3]; const int iH = (int)extraParams[4]; const int iW = (int)extraParams[5]; const int dH = (int)extraParams[6]; const int dW = (int)extraParams[7]; const int inStride0 = inStride[0]; const int inStride1 = inStride[1]; const int inStride2 = inStride[2]; const int inStride3 = inStride[3]; const int inStride4 = inStride[4]; const int inStride5 = inStride[5]; const int outStride0 = outStride[0]; const int outStride1 = outStride[1]; const int outStride2 = outStride[2]; const int outStride3 = outStride[3]; const T* out0End = result + outStride1 * iC; const int kRowEnd = -pH + kH * dH; const int inStepOW = oW * inStride5; const int kColEnd = -pW + kW * dW; const int inRowEnd = oH * sH; const int inColEnd = oW * sW; int inRowStart, inColStart, inRow, inCol; T *out0, *out1, *out2; memset(result, 0, shape::length(resultShapeBuffer) * sizeof(T)); if (shape::order(xShapeBuffer) == 'c' && shape::order(resultShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(xShapeBuffer) && shape::strideDescendingCAscendingF(resultShapeBuffer)) { #pragma omp parallel for schedule(guided) proc_bind(close) private(out0, out1, out2, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { T *input = dx + (b * inStride0); out0 = result + (b * outStride0); for (int channel = 0; channel < iC; ++channel, out0 += outStride1) { for (int kRow = 0; kRow < kH; ++kRow) { inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; ++kCol) { inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow += sH) { if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) { input += inStepOW; } else { inCol = inColStart; out1 = out0 + inRow * outStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, input += inStride5) { if (is_a_ge_zero_and_a_lt_b(inCol, iW)) { out2 = out1 + inCol * outStride3; *out2 += *input; } } } } } } } } } else { T *in0, *in1, *in2, *in3, *in4; #pragma omp parallel for schedule(guided) proc_bind(close) private(in0, in1, in2, in3, in4, out0, out1, out2, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { out0 = result + (b * outStride0); in0 = dx + b * inStride0; for (int channel = 0; channel < iC; ++channel, out0+=outStride1, in0+=inStride1) { in1 = in0; for (int kRow = 0; kRow < kH; ++kRow, in1+=inStride2) { in2 = in1; inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; ++kCol, in2+=inStride3) { in3 = in2; inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow+=sH, in3+=inStride4) { in4 = in3; if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) { in4 += inStepOW; } else { inCol = inColStart; out1 = out0 + inRow * outStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol+=sW, in4+=inStride5) { if (is_a_ge_zero_and_a_lt_b(inCol, iW)) { out2 = out1 + inCol * outStride3; *out2 += *in4; } } } } } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename T> class Reverse { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ Nd4jLong xLength; __shared__ int xEWS; __shared__ char xOrder; __shared__ Nd4jLong sLength; __shared__ T *shmem; int tid = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x == 0) { xLength = shape::length(xShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); xOrder = shape::order(xShapeBuffer); sLength = xLength - 1; extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; } __syncthreads(); if (dx == result) { if (xEWS == 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx = sLength - e; T tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS >= 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; T tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { __shared__ int xRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); xStride = shape::stride(xShapeBuffer); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } else { __shared__ int zEWS; __shared__ char zOrder; if (threadIdx.x == 0) { zEWS = shape::elementWiseStride(zShapeBuffer); zOrder = shape::order(zShapeBuffer); } __syncthreads(); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { // loop for whole array for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { __shared__ int xRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; __shared__ int zRank; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); xStride = shape::stride(xShapeBuffer); zRank = shape::rank(zShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); zStride = shape::stride(zShapeBuffer); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } } #endif static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { Nd4jLong xLength = shape::length(xShapeBuffer); int xEWS = shape::elementWiseStride(xShapeBuffer); char xOrder = shape::order(xShapeBuffer); Nd4jLong sLength = xLength - 1; // two step phase here if (dx == result) { if (xEWS == 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx = sLength - e; T tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS > 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; T tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { int xRank = shape::rank(xShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; #pragma omp parallel for private(xCoord, zCoord) schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } else { // single step phase here auto zEWS = shape::elementWiseStride(zShapeBuffer); auto zOrder = shape::order(zShapeBuffer); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { int xRank = shape::rank(xShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); int zRank = shape::rank(zShapeBuffer); auto zShape = shape::shapeOf(zShapeBuffer); auto zStride = shape::stride(zShapeBuffer); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; #pragma omp parallel for private(xCoord, zCoord) schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { if (xOrder == 'c') shape::ind2subC(xRank, xShape, e, xCoord); else shape::ind2sub(xRank, xShape, e, xCoord); if (zOrder == 'c') shape::ind2subC(zRank, zShape, (sLength - e), zCoord); else shape::ind2sub(zRank, zShape, (sLength - e), zCoord); auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank); result[zOffset] = dx[xOffset]; } } } } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class SoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; auto length = shape::length(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //compute the row wise maxes __shared__ Nd4jLong maxShape[2]; // it's always 2d here __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) { maxResult = (T) 0.0; maxShape[0] = shape[0]; maxShape[1] = 1; maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); } __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes std::vector<T> maxResult(shape[0]); for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer)) { T max = -FLOAT_MAX_VALUE; T sum = 0; int elementWiseStride = shape::elementWiseStride(xShapeBuffer); int resultElementWiseStride = shape::elementWiseStride(resultShapeBuffer); int length = shape::length(xShapeBuffer); if (elementWiseStride >= 1 && resultElementWiseStride >= 1) { if (elementWiseStride == 1 && resultElementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, dx[i]); } #pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } } else { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, dx[i * elementWiseStride]); } #pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { T r = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max); result[i * resultElementWiseStride] = r; sum += r; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * resultElementWiseStride] /= sum; } } } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class LogSoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //iterate along rows __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; if (threadIdx.x == 0) { maxResult = (T) 0.0; } __syncthreads(); //compute the row wise maxes Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); functions::transform::Transform<T>::template transformCuda<simdOps::Log<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes std::vector <T> maxResult(shape[0]); #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); functions::transform::Transform<T>::template exec<simdOps::Log<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer, 2)) { T max = -FLOAT_MAX_VALUE; T sum = 0; int elementWiseStride = shape::elementWiseStride(xShapeBuffer); int length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; result[i] = nd4j::math::nd4j_log<T>(result[i]); } } else if (elementWiseStride > 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max); sum += result[i * elementWiseStride]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; result[i * elementWiseStride] = nd4j::math::nd4j_log<T>(result[i * elementWiseStride]); } } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; /** * softmax(x) */ template<typename T> class SoftMaxDerivative { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; __shared__ Nd4jLong resultEWS; auto length = shape::length(xShapeBuffer); if (threadIdx.x == 0) { resultEWS = shape::elementWiseStride(resultShapeBuffer); maxResult = (T) 0.0; } __syncthreads(); auto tride = shape::stride(xShapeBuffer); Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); if (resultEWS >= 1) { for (int i = threadIdx.x; i < length; i += blockDim.x) { result[i * resultEWS] = result[i * resultEWS] * ((T) 1.0 - result[i * resultEWS]); } } else { printf("Non element wise stride not supported right now\n"); } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); auto resultEleStide = shape::elementWiseStride(resultShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; int len = shape::length(xShapeBuffer); //compute the row wise maxes std::vector <T> maxResult(shape[0]); #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); if (resultEleStide >= 1) { if (resultEleStide == 1) { #pragma omp simd for (int i = 0; i < len; i++) { result[i] = result[i] * ((T) 1.0f - result[i]); } } else { #pragma omp simd for (int i = 0; i < len; i++) { result[i * resultEleStide] = result[i * resultEleStide] * ((T) 1.0f - result[i * resultEleStide]); } } } else { auto zShape = shape::shapeOf(resultShapeBuffer); auto zStride = shape::stride(resultShapeBuffer); int zRank = shape::rank(resultShapeBuffer); Nd4jLong zCoord[MAX_RANK]; for (int i = 0; i < len; i++) { shape::ind2subC(zRank,zShape, i, zCoord); Nd4jLong zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank); result[zOffset] = result[zOffset] * ((T) 1.0f - result[zOffset]); } } delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer, 2)) { T max = -FLOAT_MAX_VALUE; T sum = 0; auto elementWiseStride = shape::elementWiseStride(xShapeBuffer); auto length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] -= max; result[i] = nd4j::math::nd4j_exp<T>(result[i]); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] = result[i] * ((T) 1.0f - result[i]); } } else if (elementWiseStride >= 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] -= max; result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(result[i * elementWiseStride]); sum += result[i * elementWiseStride]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] = result[i * elementWiseStride] * ((T) 1.0f - result[i * elementWiseStride]); } } else { printf("non-ews access on row not implemented yet"); } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class IsMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void doAllCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) { // this code is safe to delete, it's never used /* __shared__ int maxIdx; __shared__ int length; if (threadIdx.x == 0) { length = shape::length(resultShapeBuffer); } __syncthreads(); functions::indexreduce::IndexReduce<T>::template transform<simdOps::IndexMax<T>>( dx, xShapeBuffer, extraParams, result, resultShapeBuffer, nullptr, 1, 1, allocationPointer, reductionPointer, manager, nullptr, nullptr); __syncthreads(); if (threadIdx.x == 0) maxIdx = (int)result[0]; __syncthreads(); for (int i = threadIdx.x; i < length; i += blockDim.x) result[i] = 0; __syncthreads(); if (threadIdx.x == 0) { result[maxIdx] = 1.0; } */ } #endif #ifdef __CUDACC__ inline __host__ #elif defined(__GNUC__) #endif static void doAll( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams) { auto length = shape::length(xShapeBuffer); auto eleStride = shape::elementWiseStride(xShapeBuffer); auto resultEleStride = shape::elementWiseStride(resultShapeBuffer); auto xOrder = shape::order(xShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); /* int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); */ if (xOrder == resultOrder && xOrder == 'c') { if (eleStride == 1 && resultEleStride == 1) { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; T currMax = dx[0]; //#pragma omp simd reduction (max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = 0.0; } result[maxIdx] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx] = 1.0; } } else { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; T currMax = dx[0]; //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { result[i * resultEleStride] = 0.0; if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } } result[maxIdx * resultEleStride] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { result[i * resultEleStride] = 0.0; if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx * resultEleStride] = 1.0; } } } else { Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto resultStride = shape::stride(resultShapeBuffer); int rank = shape::rank(xShapeBuffer); T *originalResult = result; if (PrepareTwoRawArrayIter<T>(rank, xShape, dx, xStride, result, resultStride, &rank, shapeIter, &dx, xStridesIter, &result, resultStridesIter) >= 0) { T value = dx[0]; int idx = 0; int maxIdx = 0; ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (dx[0] > value) { value = dx[0]; maxIdx = idx; } idx++; result[0] = 0.0; } ND4J_RAW_ITER_TWO_NEXT( dim, rank, coord, shapeIter, dx, xStridesIter, result, resultStridesIter); //pointer to where max value would be if (shape::order(resultShapeBuffer) == 'c' || (shape::order(resultShapeBuffer) == 'f' && maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1] >= shape::length(resultShapeBuffer))) originalResult[maxIdx] = 1.0; else originalResult[maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1]] = 1.0; } } } public: #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { // FIXME: MAX_DIMENSION is lower then FP16 frame if (extraParams == nullptr || (int) extraParams[0] == MAX_DIMENSION) { doAllCuda(dx, xShapeBuffer, result, resultShapeBuffer, extraParams, allocationPointer, reductionPointer, manager); } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { //FIXME: this op should be moved to CustomOps if (extraParams == nullptr || (int)extraParams[0] == 0 || ((int)extraParams[0] == 1 && (int)extraParams[1] == MAX_DIMENSION)) { doAll(dx, xShapeBuffer, result, resultShapeBuffer, extraParams); } else if (shape::isVector(xShapeBuffer)) { int dimensionLength = (int)extraParams[0]; int *dimension = new int[dimensionLength]; int length = shape::length(xShapeBuffer); for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int)extraParams[i + 1]; } if (shape::shapeOf(xShapeBuffer)[dimension[0]] == 1) { for (int i = 0; i < length; i++) { result[i] = 1.0; } } else { int eleStride = shape::elementWiseStride(xShapeBuffer); if (eleStride == 1) { int maxIdx = 0; T currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = 0.0; } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } result[i] = 0.0; } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = 1.0; } } } else { int dimensionLength = (int) extraParams[0]; int *dimension = new int[dimensionLength]; #pragma omp simd for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int) extraParams[i + 1]; } //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem auto tadShapeShapeInfo = tadShapeInfo; shape::TAD tad (xShapeBuffer, dimension, dimensionLength); if(tadShapeInfo==nullptr) { tad.createTadOnlyShapeInfo(); tad.createOffsets(); tadShapeShapeInfo = tad.tadOnlyShapeInfo; tadOffsets = tad.tadOffsets; } auto tadLength = shape::tadLength(xShapeBuffer, dimension, dimensionLength); auto tads = shape::length(xShapeBuffer) / tadLength; int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto tadEWS = shape::elementWiseStride(tadShapeShapeInfo); auto zEWS = tadEWS; int span = (tads / num_threads) + 8; #pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) { int tid = omp_get_thread_num(); int start = span * tid; int end = span * (tid + 1); if (end > tads) end = tads; for (int r = start; r < end; r++) { if (tadEWS > 0 && zEWS > 0 && dimensionLength == 1) { T *rX = dx + tadOffsets[r]; T *rZ = result + tadOffsets[r]; T maxValue = rX[0]; int maxIdx = 0; if (tadEWS == 1 && zEWS == 1) { //#pragma omp simd reduction(max:maxValue,maxIdx) for (int i = 0; i < tadLength; i++) { if (rX[i] > maxValue) { maxIdx = i; maxValue = rX[i]; } } #pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i] = maxIdx == i ? (T) 1.0 : (T) 0.0; } } else { //#pragma omp parallel for reduction(max:maxValue,maxIdx) default(shared) for (int i = 0; i < tadLength; i++) { if (rX[i * tadEWS] > maxValue) { maxIdx = i; maxValue = rX[i * tadEWS]; } } #pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i * zEWS] = maxIdx == i ? (T) 1.0 : (T) 0.0; } } } else { int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto offset = tadOffsets[r]; Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(tadShapeShapeInfo); auto xStride = shape::stride(tadShapeShapeInfo); auto resultStride = shape::stride(tadShapeShapeInfo); int rank = shape::rank(tadShapeShapeInfo); T *xPointer = dx + offset; T *resultPointer = result + offset; T maxValue = xPointer[0]; T *maxCursor = resultPointer; Nd4jPointer maxCursorLong = reinterpret_cast<Nd4jPointer>(maxCursor); if (PrepareTwoRawArrayIter<T>(rank, xShape, xPointer, xStride, resultPointer, resultStride, &rank, shapeIter, &xPointer, xStridesIter, &resultPointer, resultStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (maxValue < xPointer[0]) { maxCursor = resultPointer; maxCursorLong = reinterpret_cast<Nd4jPointer>(resultPointer); maxValue = xPointer[0]; } resultPointer[0] = 0.0; } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, xPointer, xStridesIter, resultPointer, resultStridesIter); maxCursor = reinterpret_cast<T *>(maxCursorLong); maxCursor[0] = 1.0; } } } } delete[] dimension; } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; }
ve_urpc.c
/** * TODO: add License header * * VE specific parts of VE-URPC. * * (C)opyright 2020 Erich Focht */ #include <stdlib.h> #include <unistd.h> #include <sys/syscall.h> #include <sys/types.h> #include <assert.h> #include <errno.h> #include <sys/mman.h> #ifdef _OPENMP #include <omp.h> #endif #include <pthread.h> #include <vhshm.h> #include <vedma.h> #include "urpc_common.h" #include "urpc_time.h" /* Initialize VH-SHM segment, map it as VEHVA. */ static int vhshm_register(urpc_peer_t *up) { struct shmid_ds ds; uint64_t remote_vehva = 0; void *remote_addr = NULL; int err = 0; dprintf("VE: shm_segid = %d\n", up->shm_segid); // // attach shared memory VH address space and register it to DMAATB, // the region is accessible for DMA unter its VEHVA remote_vehva // up->shm_addr = vh_shmat(up->shm_segid, NULL, 0, (void **)&up->shm_vehva); if (up->shm_addr == NULL) { eprintf("VE: (shm_addr == NULL)\n"); return -ENOMEM; } if (up->shm_vehva == (uint64_t)-1) { eprintf("VE: failed to attach to shm segment %d, shm_vehva=-1\n", up->shm_segid); return -ENOMEM; } return 0; } /** * @brief Pin the thread to a core. In case of OpenMP: pin all threads to consecutive cores. * * The VE side process must be pinned to a core because it is not allowed to change the core * and the DMA descriptor set. * * @param core the core of the main thread or thread #0 (for OpenMP) */ static void _pin_threads_to_cores(int core) { #ifdef _OPENMP #pragma omp parallel { int thr = omp_get_thread_num(); cpu_set_t set; memset(&set, 0, sizeof(cpu_set_t)); set.__bits[0] = (1 << (thr + core)); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set); } #else cpu_set_t set; memset(&set, 0, sizeof(cpu_set_t)); set.__bits[0] = (1 << core); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set); #endif } /** * @brief Unpin the thread(s). * * This function is needed when spawning a new pthread inside a VE handler (NEWPEER). * Without unpinning the new pthread inherits the parent's mask and is scheduled on * the same core, competing with the parent. * */ void ve_urpc_unpin(void) { uint64_t mask = (1 << MAX_VE_CORES) - 1; #ifdef _OPENMP #pragma omp parallel { int thr = omp_get_thread_num(); cpu_set_t set; memset(&set, 0, sizeof(cpu_set_t)); set.__bits[0] = mask; pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set); } #else cpu_set_t set; memset(&set, 0, sizeof(cpu_set_t)); set.__bits[0] = mask; pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set); #endif } static void ve_urpc_comm_init(urpc_comm_t *uc) { uc->mem[0].begin = 0; uc->mem[0].end = DATA_BUFF_END; uc->mem[1].begin = 0; uc->mem[1].end = 0; uc->active = &uc->mem[0]; pthread_mutex_init(&uc->lock, NULL); } // TODO: add pinning to a VE core! urpc_peer_t *ve_urpc_init(int segid) { int err = 0; char *e; long syscall(long n, ...); pid_t tid = syscall(SYS_gettid); if (getpid() != tid) { eprintf("You called ve_urpc_init() inside a forked/cloned thread.\n"); eprintf("VE DMA registration must be called from the main thread!\n"); return NULL; } urpc_peer_t *up = (urpc_peer_t *)malloc(sizeof(urpc_peer_t)); if (up == NULL) { eprintf("unable to allocate urpc_peer struct memory\n"); errno = ENOMEM; return NULL; } // // shm_segid is either in argument or in environment variable // if (segid) { up->shm_segid = segid; } else { if ((e = getenv("URPC_SHM_SEGID")) != NULL) up->shm_segid = atol(e); else { eprintf("env variable URPC_SHM_SEGID not found.\n"); free(up); errno = ENOENT; return NULL; } } // find and register shm segment err = vhshm_register(up); if (err) { free(up); up = NULL; eprintf("VE: vh_shm_register failed, err=%d.\n", err); return NULL; } up->recv.tq = (transfer_queue_t *)(up->shm_vehva); up->send.tq = (transfer_queue_t *)(up->shm_vehva + URPC_BUFF_LEN); up->recv.shm_data_vehva = up->shm_vehva + offsetof(transfer_queue_t, data); up->send.shm_data_vehva = up->shm_vehva + URPC_BUFF_LEN + offsetof(transfer_queue_t, data); ve_urpc_comm_init(&up->send); char *buff_base; uint64_t buff_base_vehva; size_t align_64mb = 64 * 1024 * 1024; size_t buff_size = 2 * URPC_BUFF_LEN; buff_size = (buff_size + align_64mb - 1) & ~(align_64mb - 1); // allocate read and write buffers in one call posix_memalign(&up->mirr_buff, align_64mb, buff_size); if (up->mirr_buff == NULL) { eprintf("VE: allocating urpc mirror buffer failed! buffsize=%lu\n", buff_size); errno = ENOMEM; return NULL; } dprintf("ve allocated buff at %p, size=%lu\n", up->mirr_buff, buff_size); buff_base = (char *)up->mirr_buff; buff_base_vehva = ve_register_mem_to_dmaatb(buff_base, buff_size); if (buff_base_vehva == (uint64_t)-1) { eprintf("VE: mapping urpc mirror buffer failed! buffsize=%lu\n", buff_size); errno = ENOMEM; return NULL; } dprintf("ve_register_mem_to_dmaatb succeeded for %p\n", buff_base); up->recv.mirr_data_buff = buff_base + offsetof(transfer_queue_t, data); up->send.mirr_data_buff = buff_base + URPC_BUFF_LEN + offsetof(transfer_queue_t, data); up->recv.mirr_data_vehva = buff_base_vehva + offsetof(transfer_queue_t, data); up->send.mirr_data_vehva = buff_base_vehva + URPC_BUFF_LEN + offsetof(transfer_queue_t, data); // initialize handler table for (int i = 0; i <= URPC_MAX_HANDLERS; i++) up->handler[i] = NULL; urpc_run_handler_init_hooks(up); // don't remove this up->core = -1; return up; } /** * We need this split iof init because VE-SHM functions can only be done in the main thread. */ int ve_urpc_init_dma(urpc_peer_t *up, int core) { char *e; // pinning to VE core must happen before initializing UDMA if (core < 0 && (e = getenv("URPC_VE_CORE")) != NULL) { core = atoi(e); } if (core >= 0) _pin_threads_to_cores(core); up->core = core; // Initialize DMA int err = ve_dma_init(); if (err) { eprintf("Failed to initialize DMA\n"); return -1; } return 0; } void ve_urpc_fini(urpc_peer_t *up) { int err; // unregister local buffer from DMAATB err = ve_unregister_mem_from_dmaatb(up->recv.mirr_data_vehva - offsetof(transfer_queue_t, data)); if (err) eprintf("VE: Failed to unregister local buffer from DMAATB\n"); // free the mirror buffer free(up->mirr_buff); // detach VH sysV shm segment if (up->shm_addr) { err = vh_shmdt(up->shm_addr); if (err) eprintf("VE: Failed to detach from VH sysV shm\n"); else { up->shm_addr = NULL; up->shm_vehva = 0; } } free(up); } /* Transfer buffer to from SHM area. The transfer length is smaller that the maximum transfer doable in one DMA descriptor (<128MiB). */ int ve_transfer_data_sync(uint64_t dst_vehva, uint64_t src_vehva, int len) { int err; err = ve_dma_post_wait(dst_vehva, src_vehva, len); if (err) { pid_t pid = getpid(); eprintf("DMA encountered exception, rc=0x%x\n", err); if (err & 0x8000) { eprintf("memory protection exception\n"); } else if (err & 0x4000) { eprintf("memory protection exception\n"); } else if (err & 0x2000) { eprintf("missing space exception\n"); } else if (err & 0x1000) { eprintf("memory access exception\n"); } else if (err & 0x0800) { eprintf("I/O access exception\n"); } eprintf("Sleeping for 40s such that you can attach a debugger.\n"); eprintf("Command: /opt/nec/ve/bin/gdb -p %d\n", pid); sleep(40); } return err; } /* URPC progress function. Process at most 'ncmds' requests from the RECV communicator. Return number of requests processed, -1 if error */ int ve_urpc_recv_progress(urpc_peer_t *up, int ncmds) { int64_t req, dhq_req; int done = 0; urpc_mb_t m; urpc_comm_t *uc = &up->recv; transfer_queue_t *tq = uc->tq; urpc_handler_func func = NULL; void *payload; size_t plen; int err; while (done < ncmds) { int64_t req = urpc_get_cmd(tq, &m); if (req < 0) break; // // set/receive payload, if needed // set_recv_payload(uc, &m, &payload, &plen); // // call handler // func = up->handler[m.c.cmd]; if (func) { err = func(up, &m, req, payload, plen); if (err) eprintf("Warning: RPC handler %d returned %d\n", m.c.cmd, err); } urpc_slot_done(tq, REQ2SLOT(req), &m); ++done; } return done; } /* Progress loop with timeout. */ int ve_urpc_recv_progress_timeout(urpc_peer_t *up, int ncmds, long timeout_us) { long done_ts = 0; do { int done = ve_urpc_recv_progress(up, ncmds); if (done == 0) { if (done_ts == 0) done_ts = get_time_us(); } else done_ts = 0; } while (done_ts == 0 || timediff_us(done_ts) < timeout_us); }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; size_t columns, rows; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images->next,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; columns=MagickMin(Cr_image->columns,Ci_image->columns); rows=MagickMin(Cr_image->rows,Ci_image->rows); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(Cr_image,complex_images,rows,1L) #endif for (y=0; y < (ssize_t) rows; y++) { const PixelPacket *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; PixelPacket *magick_restrict Ci, *magick_restrict Cr; ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,columns,1,exception); if ((Ar == (const PixelPacket *) NULL) || (Ai == (const PixelPacket *) NULL) || (Br == (const PixelPacket *) NULL) || (Bi == (const PixelPacket *) NULL) || (Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { switch (op) { case AddComplexOperator: { Cr->red=Ar->red+Br->red; Ci->red=Ai->red+Bi->red; Cr->green=Ar->green+Br->green; Ci->green=Ai->green+Bi->green; Cr->blue=Ar->blue+Br->blue; Ci->blue=Ai->blue+Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity+Br->opacity; Ci->opacity=Ai->opacity+Bi->opacity; } break; } case ConjugateComplexOperator: default: { Cr->red=Ar->red; Ci->red=(-Ai->red); Cr->green=Ar->green; Ci->green=(-Ai->green); Cr->blue=Ar->blue; Ci->blue=(-Ai->blue); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity; Ci->opacity=(-Ai->opacity); } break; } case DivideComplexOperator: { double gamma; gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->red*Br->red+ QuantumScale*Bi->red*Bi->red+snr); Cr->red=gamma*(QuantumScale*Ar->red*Br->red+QuantumScale*Ai->red* Bi->red); Ci->red=gamma*(QuantumScale*Ai->red*Br->red-QuantumScale*Ar->red* Bi->red); gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->green* Br->green+QuantumScale*Bi->green*Bi->green+snr); Cr->green=gamma*(QuantumScale*Ar->green*Br->green+QuantumScale* Ai->green*Bi->green); Ci->green=gamma*(QuantumScale*Ai->green*Br->green-QuantumScale* Ar->green*Bi->green); gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->blue* Br->blue+QuantumScale*Bi->blue*Bi->blue+snr); Cr->blue=gamma*(QuantumScale*Ar->blue*Br->blue+QuantumScale* Ai->blue*Bi->blue); Ci->blue=gamma*(QuantumScale*Ai->blue*Br->blue-QuantumScale* Ar->blue*Bi->blue); if (images->matte != MagickFalse) { gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->opacity* Br->opacity+QuantumScale*Bi->opacity*Bi->opacity+snr); Cr->opacity=gamma*(QuantumScale*Ar->opacity*Br->opacity+ QuantumScale*Ai->opacity*Bi->opacity); Ci->opacity=gamma*(QuantumScale*Ai->opacity*Br->opacity- QuantumScale*Ar->opacity*Bi->opacity); } break; } case MagnitudePhaseComplexOperator: { Cr->red=sqrt(QuantumScale*Ar->red*Ar->red+QuantumScale* Ai->red*Ai->red); Ci->red=atan2((double) Ai->red,(double) Ar->red)/(2.0*MagickPI)+0.5; Cr->green=sqrt(QuantumScale*Ar->green*Ar->green+QuantumScale* Ai->green*Ai->green); Ci->green=atan2((double) Ai->green,(double) Ar->green)/ (2.0*MagickPI)+0.5; Cr->blue=sqrt(QuantumScale*Ar->blue*Ar->blue+QuantumScale* Ai->blue*Ai->blue); Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5; if (images->matte != MagickFalse) { Cr->opacity=sqrt(QuantumScale*Ar->opacity*Ar->opacity+ QuantumScale*Ai->opacity*Ai->opacity); Ci->opacity=atan2((double) Ai->opacity,(double) Ar->opacity)/ (2.0*MagickPI)+0.5; } break; } case MultiplyComplexOperator: { Cr->red=(QuantumScale*Ar->red*Br->red-(double) Ai->red*Bi->red); Ci->red=(QuantumScale*Ai->red*Br->red+(double) Ar->red*Bi->red); Cr->green=(QuantumScale*Ar->green*Br->green-(double) Ai->green*Bi->green); Ci->green=(QuantumScale*Ai->green*Br->green+(double) Ar->green*Bi->green); Cr->blue=(QuantumScale*Ar->blue*Br->blue-(double) Ai->blue*Bi->blue); Ci->blue=(QuantumScale*Ai->blue*Br->blue+(double) Ar->blue*Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=(QuantumScale*Ar->opacity*Br->opacity- QuantumScale*Ai->opacity*Bi->opacity); Ci->opacity=(QuantumScale*Ai->opacity*Br->opacity+ QuantumScale*Ar->opacity*Bi->opacity); } break; } case RealImaginaryComplexOperator: { Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5)); Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5)); Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5)); Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5)); Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5)); Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5)); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5)); Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5)); } break; } case SubtractComplexOperator: { Cr->red=Ar->red-Br->red; Ci->red=Ai->red-Bi->red; Cr->green=Ar->green-Br->green; Ci->green=Ai->green-Bi->green; Cr->blue=Ar->blue-Br->blue; Ci->blue=Ai->blue-Bi->blue; if (Cr_image->matte != MagickFalse) { Cr->opacity=Ar->opacity-Br->opacity; Ci->opacity=Ai->opacity-Bi->opacity; } break; } } Ar++; Ai++; Br++; Bi++; Cr++; Ci++; } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width*sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; IndexPacket *indexes; PixelPacket *q; ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* magnitude_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; const IndexPacket *indexes; const PixelPacket *p; ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { source_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { source_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize forward transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsGrayImage(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayChannels,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image,RedChannel, modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->matte != MagickFalse) thread_status=ForwardFourierTransformChannel(image, OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; const IndexPacket *indexes; const PixelPacket *p; ssize_t i, x; ssize_t y; /* Inverse Fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { magnitude_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { phase_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { phase_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; double *source_pixels; const char *value; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; IndexPacket *indexes; PixelPacket *q; ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* source_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsGrayImage(magnitude_image,exception); if (is_gray != MagickFalse) is_gray=IsGrayImage(phase_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayChannels,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
kmp_atomic.c
/* * kmp_atomic.c -- ATOMIC implementation routines */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "kmp_atomic.h" #include "kmp.h" // TRUE, asm routines prototypes typedef unsigned char uchar; typedef unsigned short ushort; /*! @defgroup ATOMIC_OPS Atomic Operations These functions are used for implementing the many different varieties of atomic operations. The compiler is at liberty to inline atomic operations that are naturally supported by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined @code static int s = 0; #pragma omp atomic s++; @endcode using the single instruction: `lock; incl s` However the runtime does provide entrypoints for these operations to support compilers that choose not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the increment above.) The names of the functions are encoded by using the data type name and the operation name, as in these tables. Data Type | Data type encoding -----------|--------------- int8_t | `fixed1` uint8_t | `fixed1u` int16_t | `fixed2` uint16_t | `fixed2u` int32_t | `fixed4` uint32_t | `fixed4u` int32_t | `fixed8` uint32_t | `fixed8u` float | `float4` double | `float8` float 10 (8087 eighty bit float) | `float10` complex<float> | `cmplx4` complex<double> | `cmplx8` complex<float10> | `cmplx10` <br> Operation | Operation encoding ----------|------------------- + | add - | sub \* | mul / | div & | andb << | shl \>\> | shr \| | orb ^ | xor && | andl \|\| | orl maximum | max minimum | min .eqv. | eqv .neqv. | neqv <br> For non-commutative operations, `_rev` can also be added for the reversed operation. For the functions that capture the result, the suffix `_cpt` is added. Update Functions ================ The general form of an atomic function that just performs an update (without a `capture`) @code void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand `capture` functions =================== The capture functions perform an atomic update and return a result, which is either the value before the capture, or that after. They take an additional argument to determine which result is returned. Their general form is therefore @code TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand @param flag one if the result is to be captured *after* the operation, zero if captured *before*. The one set of exceptions to this is the `complex<float>` type where the value is not returned, rather an extra argument pointer is passed. They look like @code void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag ); @endcode Read and Write Operations ========================= The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the value is read or written atomically, with no modification performed. In many cases on IA-32 architecture these operations can be inlined since the architecture guarantees that no tearing occurs on aligned objects accessed with a single memory operation of up to 64 bits in size. The general form of the read operations is @code TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc ); @endcode For the write operations the form is @code void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode Full list of functions ====================== This leads to the generation of 376 atomic functions, as follows. Functons for integers --------------------- There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters). @code __kmpc_atomic_fixed1_add __kmpc_atomic_fixed1_add_cpt __kmpc_atomic_fixed1_add_fp __kmpc_atomic_fixed1_andb __kmpc_atomic_fixed1_andb_cpt __kmpc_atomic_fixed1_andl __kmpc_atomic_fixed1_andl_cpt __kmpc_atomic_fixed1_div __kmpc_atomic_fixed1_div_cpt __kmpc_atomic_fixed1_div_cpt_rev __kmpc_atomic_fixed1_div_float8 __kmpc_atomic_fixed1_div_fp __kmpc_atomic_fixed1_div_rev __kmpc_atomic_fixed1_eqv __kmpc_atomic_fixed1_eqv_cpt __kmpc_atomic_fixed1_max __kmpc_atomic_fixed1_max_cpt __kmpc_atomic_fixed1_min __kmpc_atomic_fixed1_min_cpt __kmpc_atomic_fixed1_mul __kmpc_atomic_fixed1_mul_cpt __kmpc_atomic_fixed1_mul_float8 __kmpc_atomic_fixed1_mul_fp __kmpc_atomic_fixed1_neqv __kmpc_atomic_fixed1_neqv_cpt __kmpc_atomic_fixed1_orb __kmpc_atomic_fixed1_orb_cpt __kmpc_atomic_fixed1_orl __kmpc_atomic_fixed1_orl_cpt __kmpc_atomic_fixed1_rd __kmpc_atomic_fixed1_shl __kmpc_atomic_fixed1_shl_cpt __kmpc_atomic_fixed1_shl_cpt_rev __kmpc_atomic_fixed1_shl_rev __kmpc_atomic_fixed1_shr __kmpc_atomic_fixed1_shr_cpt __kmpc_atomic_fixed1_shr_cpt_rev __kmpc_atomic_fixed1_shr_rev __kmpc_atomic_fixed1_sub __kmpc_atomic_fixed1_sub_cpt __kmpc_atomic_fixed1_sub_cpt_rev __kmpc_atomic_fixed1_sub_fp __kmpc_atomic_fixed1_sub_rev __kmpc_atomic_fixed1_swp __kmpc_atomic_fixed1_wr __kmpc_atomic_fixed1_xor __kmpc_atomic_fixed1_xor_cpt __kmpc_atomic_fixed1u_div __kmpc_atomic_fixed1u_div_cpt __kmpc_atomic_fixed1u_div_cpt_rev __kmpc_atomic_fixed1u_div_fp __kmpc_atomic_fixed1u_div_rev __kmpc_atomic_fixed1u_shr __kmpc_atomic_fixed1u_shr_cpt __kmpc_atomic_fixed1u_shr_cpt_rev __kmpc_atomic_fixed1u_shr_rev __kmpc_atomic_fixed2_add __kmpc_atomic_fixed2_add_cpt __kmpc_atomic_fixed2_add_fp __kmpc_atomic_fixed2_andb __kmpc_atomic_fixed2_andb_cpt __kmpc_atomic_fixed2_andl __kmpc_atomic_fixed2_andl_cpt __kmpc_atomic_fixed2_div __kmpc_atomic_fixed2_div_cpt __kmpc_atomic_fixed2_div_cpt_rev __kmpc_atomic_fixed2_div_float8 __kmpc_atomic_fixed2_div_fp __kmpc_atomic_fixed2_div_rev __kmpc_atomic_fixed2_eqv __kmpc_atomic_fixed2_eqv_cpt __kmpc_atomic_fixed2_max __kmpc_atomic_fixed2_max_cpt __kmpc_atomic_fixed2_min __kmpc_atomic_fixed2_min_cpt __kmpc_atomic_fixed2_mul __kmpc_atomic_fixed2_mul_cpt __kmpc_atomic_fixed2_mul_float8 __kmpc_atomic_fixed2_mul_fp __kmpc_atomic_fixed2_neqv __kmpc_atomic_fixed2_neqv_cpt __kmpc_atomic_fixed2_orb __kmpc_atomic_fixed2_orb_cpt __kmpc_atomic_fixed2_orl __kmpc_atomic_fixed2_orl_cpt __kmpc_atomic_fixed2_rd __kmpc_atomic_fixed2_shl __kmpc_atomic_fixed2_shl_cpt __kmpc_atomic_fixed2_shl_cpt_rev __kmpc_atomic_fixed2_shl_rev __kmpc_atomic_fixed2_shr __kmpc_atomic_fixed2_shr_cpt __kmpc_atomic_fixed2_shr_cpt_rev __kmpc_atomic_fixed2_shr_rev __kmpc_atomic_fixed2_sub __kmpc_atomic_fixed2_sub_cpt __kmpc_atomic_fixed2_sub_cpt_rev __kmpc_atomic_fixed2_sub_fp __kmpc_atomic_fixed2_sub_rev __kmpc_atomic_fixed2_swp __kmpc_atomic_fixed2_wr __kmpc_atomic_fixed2_xor __kmpc_atomic_fixed2_xor_cpt __kmpc_atomic_fixed2u_div __kmpc_atomic_fixed2u_div_cpt __kmpc_atomic_fixed2u_div_cpt_rev __kmpc_atomic_fixed2u_div_fp __kmpc_atomic_fixed2u_div_rev __kmpc_atomic_fixed2u_shr __kmpc_atomic_fixed2u_shr_cpt __kmpc_atomic_fixed2u_shr_cpt_rev __kmpc_atomic_fixed2u_shr_rev __kmpc_atomic_fixed4_add __kmpc_atomic_fixed4_add_cpt __kmpc_atomic_fixed4_add_fp __kmpc_atomic_fixed4_andb __kmpc_atomic_fixed4_andb_cpt __kmpc_atomic_fixed4_andl __kmpc_atomic_fixed4_andl_cpt __kmpc_atomic_fixed4_div __kmpc_atomic_fixed4_div_cpt __kmpc_atomic_fixed4_div_cpt_rev __kmpc_atomic_fixed4_div_float8 __kmpc_atomic_fixed4_div_fp __kmpc_atomic_fixed4_div_rev __kmpc_atomic_fixed4_eqv __kmpc_atomic_fixed4_eqv_cpt __kmpc_atomic_fixed4_max __kmpc_atomic_fixed4_max_cpt __kmpc_atomic_fixed4_min __kmpc_atomic_fixed4_min_cpt __kmpc_atomic_fixed4_mul __kmpc_atomic_fixed4_mul_cpt __kmpc_atomic_fixed4_mul_float8 __kmpc_atomic_fixed4_mul_fp __kmpc_atomic_fixed4_neqv __kmpc_atomic_fixed4_neqv_cpt __kmpc_atomic_fixed4_orb __kmpc_atomic_fixed4_orb_cpt __kmpc_atomic_fixed4_orl __kmpc_atomic_fixed4_orl_cpt __kmpc_atomic_fixed4_rd __kmpc_atomic_fixed4_shl __kmpc_atomic_fixed4_shl_cpt __kmpc_atomic_fixed4_shl_cpt_rev __kmpc_atomic_fixed4_shl_rev __kmpc_atomic_fixed4_shr __kmpc_atomic_fixed4_shr_cpt __kmpc_atomic_fixed4_shr_cpt_rev __kmpc_atomic_fixed4_shr_rev __kmpc_atomic_fixed4_sub __kmpc_atomic_fixed4_sub_cpt __kmpc_atomic_fixed4_sub_cpt_rev __kmpc_atomic_fixed4_sub_fp __kmpc_atomic_fixed4_sub_rev __kmpc_atomic_fixed4_swp __kmpc_atomic_fixed4_wr __kmpc_atomic_fixed4_xor __kmpc_atomic_fixed4_xor_cpt __kmpc_atomic_fixed4u_div __kmpc_atomic_fixed4u_div_cpt __kmpc_atomic_fixed4u_div_cpt_rev __kmpc_atomic_fixed4u_div_fp __kmpc_atomic_fixed4u_div_rev __kmpc_atomic_fixed4u_shr __kmpc_atomic_fixed4u_shr_cpt __kmpc_atomic_fixed4u_shr_cpt_rev __kmpc_atomic_fixed4u_shr_rev __kmpc_atomic_fixed8_add __kmpc_atomic_fixed8_add_cpt __kmpc_atomic_fixed8_add_fp __kmpc_atomic_fixed8_andb __kmpc_atomic_fixed8_andb_cpt __kmpc_atomic_fixed8_andl __kmpc_atomic_fixed8_andl_cpt __kmpc_atomic_fixed8_div __kmpc_atomic_fixed8_div_cpt __kmpc_atomic_fixed8_div_cpt_rev __kmpc_atomic_fixed8_div_float8 __kmpc_atomic_fixed8_div_fp __kmpc_atomic_fixed8_div_rev __kmpc_atomic_fixed8_eqv __kmpc_atomic_fixed8_eqv_cpt __kmpc_atomic_fixed8_max __kmpc_atomic_fixed8_max_cpt __kmpc_atomic_fixed8_min __kmpc_atomic_fixed8_min_cpt __kmpc_atomic_fixed8_mul __kmpc_atomic_fixed8_mul_cpt __kmpc_atomic_fixed8_mul_float8 __kmpc_atomic_fixed8_mul_fp __kmpc_atomic_fixed8_neqv __kmpc_atomic_fixed8_neqv_cpt __kmpc_atomic_fixed8_orb __kmpc_atomic_fixed8_orb_cpt __kmpc_atomic_fixed8_orl __kmpc_atomic_fixed8_orl_cpt __kmpc_atomic_fixed8_rd __kmpc_atomic_fixed8_shl __kmpc_atomic_fixed8_shl_cpt __kmpc_atomic_fixed8_shl_cpt_rev __kmpc_atomic_fixed8_shl_rev __kmpc_atomic_fixed8_shr __kmpc_atomic_fixed8_shr_cpt __kmpc_atomic_fixed8_shr_cpt_rev __kmpc_atomic_fixed8_shr_rev __kmpc_atomic_fixed8_sub __kmpc_atomic_fixed8_sub_cpt __kmpc_atomic_fixed8_sub_cpt_rev __kmpc_atomic_fixed8_sub_fp __kmpc_atomic_fixed8_sub_rev __kmpc_atomic_fixed8_swp __kmpc_atomic_fixed8_wr __kmpc_atomic_fixed8_xor __kmpc_atomic_fixed8_xor_cpt __kmpc_atomic_fixed8u_div __kmpc_atomic_fixed8u_div_cpt __kmpc_atomic_fixed8u_div_cpt_rev __kmpc_atomic_fixed8u_div_fp __kmpc_atomic_fixed8u_div_rev __kmpc_atomic_fixed8u_shr __kmpc_atomic_fixed8u_shr_cpt __kmpc_atomic_fixed8u_shr_cpt_rev __kmpc_atomic_fixed8u_shr_rev @endcode Functions for floating point ---------------------------- There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes. (Ten byte floats are used by X87, but are now rare). @code __kmpc_atomic_float4_add __kmpc_atomic_float4_add_cpt __kmpc_atomic_float4_add_float8 __kmpc_atomic_float4_add_fp __kmpc_atomic_float4_div __kmpc_atomic_float4_div_cpt __kmpc_atomic_float4_div_cpt_rev __kmpc_atomic_float4_div_float8 __kmpc_atomic_float4_div_fp __kmpc_atomic_float4_div_rev __kmpc_atomic_float4_max __kmpc_atomic_float4_max_cpt __kmpc_atomic_float4_min __kmpc_atomic_float4_min_cpt __kmpc_atomic_float4_mul __kmpc_atomic_float4_mul_cpt __kmpc_atomic_float4_mul_float8 __kmpc_atomic_float4_mul_fp __kmpc_atomic_float4_rd __kmpc_atomic_float4_sub __kmpc_atomic_float4_sub_cpt __kmpc_atomic_float4_sub_cpt_rev __kmpc_atomic_float4_sub_float8 __kmpc_atomic_float4_sub_fp __kmpc_atomic_float4_sub_rev __kmpc_atomic_float4_swp __kmpc_atomic_float4_wr __kmpc_atomic_float8_add __kmpc_atomic_float8_add_cpt __kmpc_atomic_float8_add_fp __kmpc_atomic_float8_div __kmpc_atomic_float8_div_cpt __kmpc_atomic_float8_div_cpt_rev __kmpc_atomic_float8_div_fp __kmpc_atomic_float8_div_rev __kmpc_atomic_float8_max __kmpc_atomic_float8_max_cpt __kmpc_atomic_float8_min __kmpc_atomic_float8_min_cpt __kmpc_atomic_float8_mul __kmpc_atomic_float8_mul_cpt __kmpc_atomic_float8_mul_fp __kmpc_atomic_float8_rd __kmpc_atomic_float8_sub __kmpc_atomic_float8_sub_cpt __kmpc_atomic_float8_sub_cpt_rev __kmpc_atomic_float8_sub_fp __kmpc_atomic_float8_sub_rev __kmpc_atomic_float8_swp __kmpc_atomic_float8_wr __kmpc_atomic_float10_add __kmpc_atomic_float10_add_cpt __kmpc_atomic_float10_add_fp __kmpc_atomic_float10_div __kmpc_atomic_float10_div_cpt __kmpc_atomic_float10_div_cpt_rev __kmpc_atomic_float10_div_fp __kmpc_atomic_float10_div_rev __kmpc_atomic_float10_mul __kmpc_atomic_float10_mul_cpt __kmpc_atomic_float10_mul_fp __kmpc_atomic_float10_rd __kmpc_atomic_float10_sub __kmpc_atomic_float10_sub_cpt __kmpc_atomic_float10_sub_cpt_rev __kmpc_atomic_float10_sub_fp __kmpc_atomic_float10_sub_rev __kmpc_atomic_float10_swp __kmpc_atomic_float10_wr __kmpc_atomic_float16_add __kmpc_atomic_float16_add_cpt __kmpc_atomic_float16_div __kmpc_atomic_float16_div_cpt __kmpc_atomic_float16_div_cpt_rev __kmpc_atomic_float16_div_rev __kmpc_atomic_float16_max __kmpc_atomic_float16_max_cpt __kmpc_atomic_float16_min __kmpc_atomic_float16_min_cpt __kmpc_atomic_float16_mul __kmpc_atomic_float16_mul_cpt __kmpc_atomic_float16_rd __kmpc_atomic_float16_sub __kmpc_atomic_float16_sub_cpt __kmpc_atomic_float16_sub_cpt_rev __kmpc_atomic_float16_sub_rev __kmpc_atomic_float16_swp __kmpc_atomic_float16_wr @endcode Functions for Complex types --------------------------- Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes. The names here are based on the size of the component float, *not* the size of the complex type. So `__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`. @code __kmpc_atomic_cmplx4_add __kmpc_atomic_cmplx4_add_cmplx8 __kmpc_atomic_cmplx4_add_cpt __kmpc_atomic_cmplx4_div __kmpc_atomic_cmplx4_div_cmplx8 __kmpc_atomic_cmplx4_div_cpt __kmpc_atomic_cmplx4_div_cpt_rev __kmpc_atomic_cmplx4_div_rev __kmpc_atomic_cmplx4_mul __kmpc_atomic_cmplx4_mul_cmplx8 __kmpc_atomic_cmplx4_mul_cpt __kmpc_atomic_cmplx4_rd __kmpc_atomic_cmplx4_sub __kmpc_atomic_cmplx4_sub_cmplx8 __kmpc_atomic_cmplx4_sub_cpt __kmpc_atomic_cmplx4_sub_cpt_rev __kmpc_atomic_cmplx4_sub_rev __kmpc_atomic_cmplx4_swp __kmpc_atomic_cmplx4_wr __kmpc_atomic_cmplx8_add __kmpc_atomic_cmplx8_add_cpt __kmpc_atomic_cmplx8_div __kmpc_atomic_cmplx8_div_cpt __kmpc_atomic_cmplx8_div_cpt_rev __kmpc_atomic_cmplx8_div_rev __kmpc_atomic_cmplx8_mul __kmpc_atomic_cmplx8_mul_cpt __kmpc_atomic_cmplx8_rd __kmpc_atomic_cmplx8_sub __kmpc_atomic_cmplx8_sub_cpt __kmpc_atomic_cmplx8_sub_cpt_rev __kmpc_atomic_cmplx8_sub_rev __kmpc_atomic_cmplx8_swp __kmpc_atomic_cmplx8_wr __kmpc_atomic_cmplx10_add __kmpc_atomic_cmplx10_add_cpt __kmpc_atomic_cmplx10_div __kmpc_atomic_cmplx10_div_cpt __kmpc_atomic_cmplx10_div_cpt_rev __kmpc_atomic_cmplx10_div_rev __kmpc_atomic_cmplx10_mul __kmpc_atomic_cmplx10_mul_cpt __kmpc_atomic_cmplx10_rd __kmpc_atomic_cmplx10_sub __kmpc_atomic_cmplx10_sub_cpt __kmpc_atomic_cmplx10_sub_cpt_rev __kmpc_atomic_cmplx10_sub_rev __kmpc_atomic_cmplx10_swp __kmpc_atomic_cmplx10_wr __kmpc_atomic_cmplx16_add __kmpc_atomic_cmplx16_add_cpt __kmpc_atomic_cmplx16_div __kmpc_atomic_cmplx16_div_cpt __kmpc_atomic_cmplx16_div_cpt_rev __kmpc_atomic_cmplx16_div_rev __kmpc_atomic_cmplx16_mul __kmpc_atomic_cmplx16_mul_cpt __kmpc_atomic_cmplx16_rd __kmpc_atomic_cmplx16_sub __kmpc_atomic_cmplx16_sub_cpt __kmpc_atomic_cmplx16_sub_cpt_rev __kmpc_atomic_cmplx16_swp __kmpc_atomic_cmplx16_wr @endcode */ /*! @ingroup ATOMIC_OPS @{ */ /* * Global vars */ #ifndef KMP_GOMP_COMPAT int __kmp_atomic_mode = 1; // Intel perf #else int __kmp_atomic_mode = 2; // GOMP compatibility #endif /* KMP_GOMP_COMPAT */ KMP_ALIGN(128) kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */ kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */ kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */ kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */ kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/ kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/ kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */ /* 2007-03-02: Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a bug on *_32 and *_32e. This is just a temporary workaround for the problem. It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG routines in assembler language. */ #define KMP_ATOMIC_VOLATILE volatile #if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; }; #endif /* ------------------------------------------------------------------------ */ /* ATOMIC implementation routines */ /* one routine for each operation and operand type */ /* ------------------------------------------------------------------------ */ // All routines declarations looks like // void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs ); // ------------------------------------------------------------------------ #define KMP_CHECK_GTID \ if ( gtid == KMP_GTID_UNKNOWN ) { \ gtid = __kmp_entry_gtid(); \ } // check and get gtid when needed // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Lock variables used for critical sections for various size operands #define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat #define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char #define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short #define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int #define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float #define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int #define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double #define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex #define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double #define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad #define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex #define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex #define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) OP (rhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ // For GNU compatibility, we may need to use a critical section, // even though it is not required by the ISA. // // On IA-32 architecture, all atomic operations except for fixed 4 byte add, // sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common // critical section. On Intel(R) 64, all atomic operations are done with fetch // and add or compare and exchange. Therefore, the FLAG parameter to this // macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which // require a critical section, where we predict that they will be implemented // in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()). // // When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct, // the FLAG parameter should always be 1. If we know that we will be using // a critical section, then we want to make certain that we use the generic // lock __kmp_atomic_lock to protect the atomic update, and not of of the // locks that are specialized based upon the size or type of the data. // // If FLAG is 0, then we are relying on dead code elimination by the build // compiler to get rid of the useless block of code, and save a needless // branch at runtime. // #ifdef KMP_GOMP_COMPAT # define OP_GOMP_CRITICAL(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL( OP, 0 ); \ return; \ } # else # define OP_GOMP_CRITICAL(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ #if KMP_MIC # define KMP_DO_PAUSE _mm_delay_32( 1 ) #else # define KMP_DO_PAUSE KMP_CPU_PAUSE() #endif /* KMP_MIC */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator #define OP_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE old_value, new_value; \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ } \ } #if USE_CMPXCHG_FIX // 2007-06-25: // workaround for C78287 (complex(kind=4) data type) // lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm) // Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro. // This is a problem of the compiler. // Related tracker is C76005, targeted to 11.0. // I verified the asm of the workaround. #define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ { \ struct _sss { \ TYPE cmp; \ kmp_int##BITS *vvv; \ }; \ struct _sss old_value, new_value; \ old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \ new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \ { \ KMP_DO_PAUSE; \ \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ } \ } // end of the first part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #endif #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // end of the second part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // Routines for ATOMIC 4-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub // Routines for ATOMIC 8-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // MASK - used for alignment check // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG /* ------------------------------------------------------------------------ */ /* Routines for C/C++ Reduction operators && and || */ /* ------------------------------------------------------------------------ */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment // TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used #define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CRITICAL( = *lhs OP, LCK_ID ) \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl /* ------------------------------------------------------------------------- */ /* Routines for Fortran operators that matched no one in C: */ /* MAX, MIN, .EQV., .NEQV. */ /* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */ /* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */ /* ------------------------------------------------------------------------- */ // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ *lhs = rhs; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT( OP, 0 ); \ return; \ } #else #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value; \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT(OP,LCK_ID) \ } \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ } \ } #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \ } \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min #if KMP_HAVE_QUAD MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16 MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16 #endif #endif // ------------------------------------------------------------------------ // Need separate macros for .EQV. because of the need of complement (~) // OP ignored for critical sections, ^=~ used instead #define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \ } // ------------------------------------------------------------------------ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16 ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16 ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16 ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16 #endif #endif // routines for complex types #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div // end of the workaround for C78287 #else ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div #endif // USE_CMPXCHG_FIX ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div #if KMP_HAVE_QUAD ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16 ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16 ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16 ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16 #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: x = expr binop x for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) = (rhs) OP (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_REV( OP, 0 ); \ return; \ } #else #define OP_GOMP_CRITICAL_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev #endif #endif // routines for complex types ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev #endif #endif #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 // End of OpenMP 4.0: x = expr binop x for non-commutative operations. #endif //OMP_40_ENABLED /* ------------------------------------------------------------------------ */ /* Routines for mixed types of LHS and RHS, when RHS is "larger" */ /* Note: in order to reduce the total number of types combinations */ /* it is supposed that compiler converts RHS to longest floating type,*/ /* that is _Quad, before call to any of these routines */ /* Conversion to _Quad will be done by the compiler during calculation, */ /* conversion back to TYPE - before the assignment, like: */ /* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */ /* Performance penalty expected because of SW emulation use */ /* ------------------------------------------------------------------------ */ #define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // RHS=float8 ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8 ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them) #if KMP_HAVE_QUAD ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #else #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #endif // USE_CMPXCHG_FIX #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 ////////////////////////////////////////////////////////////////////////////////////////////////////// // ------------------------------------------------------------------------ // Atomic READ routines // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store_ret" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) // TODO: check if it is still necessary // Return old value regardless of the result of "compare & swap# operation #define OP_CMPXCHG_READ(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ union f_i_union { \ TYPE f_val; \ kmp_int##BITS i_val; \ }; \ union f_i_union old_value; \ temp_val = *loc; \ old_value.f_val = temp_val; \ old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \ new_value = old_value.f_val; \ return new_value; \ } // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_READ(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ new_value = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_READ(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \ return new_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ OP_CMPXCHG_READ(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \ return new_value; \ } // ------------------------------------------------------------------------ // Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work. // Let's return the read value through the additional parameter. #if ( KMP_OS_WINDOWS ) #define OP_CRITICAL_READ_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*out) = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \ } #endif // KMP_OS_WINDOWS // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd // !!! TODO: Remove lock operations for "char" since it can't be non-atomic ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd #endif // KMP_HAVE_QUAD // Fix for CQ220361 on Windows* OS #if ( KMP_OS_WINDOWS ) ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #else ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #endif ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd #endif #endif // ------------------------------------------------------------------------ // Atomic WRITE routines // ------------------------------------------------------------------------ #define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_FIXED##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_REAL##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_WR(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ OP_CMPXCHG_WR(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #else ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #endif ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #else ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #endif ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr #endif ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr #endif #endif // ------------------------------------------------------------------------ // Atomic CAPTURE routines // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ new_value = (*lhs); \ } else { \ new_value = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE old_value, new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ if( flag ) { \ return old_value OP rhs; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for C/C++ Reduction operators && and || // ------------------------------------------------------------------------ // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_L_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ new_value OP rhs; \ } else \ new_value = (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_L_CPT( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment #define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt // ------------------------------------------------------------------------- // Routines for Fortran operators that matched no one in C: // MAX, MIN, .EQV., .NEQV. // Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt // Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ old_value = *lhs; \ *lhs = rhs; \ if ( flag ) \ new_value = rhs; \ else \ new_value = old_value; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; \ // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT_CPT( OP, 0 ); \ } #else #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*TYPE old_value; */ \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ if( flag ) \ return rhs; \ else \ return old_value; \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ } \ return *lhs; \ } #define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ } \ return *lhs; \ } MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt #if KMP_HAVE_QUAD MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt #endif #endif // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_WRK( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \ } // The end of workaround for cmplx4 /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt #endif #endif // routines for complex types // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ /*temp_val = (*lhs);*/\ (*lhs) = (rhs) OP (*lhs); \ new_value = (*lhs); \ } else { \ new_value = (*lhs);\ (*lhs) = (rhs) OP (*lhs); \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev #endif #endif // routines for complex types // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) = (rhs) OP (*lhs); \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) = (rhs) OP (*lhs); \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ } // The end of workaround for cmplx4 // !!! TODO: check if we need to return void for cmplx4 routines // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev #endif #endif // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} #define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ old_value = (*lhs); \ (*lhs) = rhs; \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return old_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP( 0 ); \ } #else #define GOMP_CRITICAL_SWP(FLAG) #endif /* KMP_GOMP_COMPAT */ #define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define CMPXCHG_SWP(TYPE,BITS) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CMPXCHG_SWP(TYPE,BITS) \ } ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #else ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #endif // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) #define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CRITICAL_SWP(LCK_ID) \ } // ------------------------------------------------------------------------ // !!! TODO: check if we need to return void for cmplx4 routines // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP_WRK(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ tmp = (*lhs); \ (*lhs) = (rhs); \ (*out) = tmp; \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP_WRK(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP_WRK( 0 ); \ } #else #define GOMP_CRITICAL_SWP_WRK(FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ TYPE tmp; \ GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \ CRITICAL_SWP_WRK(LCK_ID) \ } // The end of workaround for cmplx4 ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp #endif // cmplx4 routine to return void ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp //ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp #endif #endif // End of OpenMP 4.0 Capture #endif //OMP_40_ENABLED #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 #undef OP_CRITICAL /* ------------------------------------------------------------------------ */ /* Generic atomic routines */ /* ------------------------------------------------------------------------ */ void __kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #else TRUE #endif ) { kmp_int8 old_value, new_value; old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs, *(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 1-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid ); } } void __kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */ #endif ) { kmp_int16 old_value, new_value; old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs, *(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 2-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid ); } } void __kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( // // FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints. // Gomp compatibility is broken if this routine is called for floats. // #if KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */ #endif ) { kmp_int32 old_value, new_value; old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs, *(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_4i for all 4-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid ); } } void __kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */ #endif ) { kmp_int64 old_value, new_value; old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs, *(kmp_int64 *) &old_value, *(kmp_int64 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_8i for all 8-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid ); } } void __kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid ); } void __kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid ); } void __kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid ); } void __kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid ); } // AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler // duplicated in order to not use 3-party names in pure Intel code // TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin. void __kmpc_atomic_start(void) { int gtid = __kmp_entry_gtid(); KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid)); __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); } void __kmpc_atomic_end(void) { int gtid = __kmp_get_gtid(); KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid)); __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); } /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /*! @} */ // end of file
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum *r; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); r=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (r == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,r,&target[0]); GetPixelInfo(image,&target[1]); r=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (r != (const Quantum *) NULL) GetPixelInfoPixel(image,r,&target[1]); GetPixelInfo(image,&target[2]); r=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (r != (const Quantum *) NULL) GetPixelInfoPixel(image,r,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) && (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ if ((image->colors) > 256) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && (GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if (QuantumRange <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel) || (channel == MetaPixelChannel)) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel)) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const Quantum *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register ssize_t x; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { type=UndefinedType; break; } p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const Quantum *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if (QuantumRange <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel)) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel)) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { if (SetImageMonochrome(image,exception) == MagickFalse) { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { if (SetImageGray(image,exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { if (SetImageGray(image,exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); status=TransformImageColorspace(image,CMYKColorspace,exception); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); status=TransformImageColorspace(image,CMYKColorspace,exception); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
chacha.c
/* Generated by Cython 0.28.5 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "extra_compile_args": [ "-fopenmp", "-O3" ], "extra_link_args": [ "-fopenmp", "-O3" ], "name": "nescient.crypto.chacha", "sources": [ "nescient/crypto/chacha.pyx" ] }, "module_name": "nescient.crypto.chacha" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_28_5" #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; // PyThread_create_key reports success always } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif // TSS (Thread Specific Storage) API #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__nescient__crypto__chacha #define __PYX_HAVE_API__nescient__crypto__chacha /* Early includes */ #include <stdint.h> #include <string.h> #include <stdlib.h> #include "pythread.h" #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "nescient/crypto/chacha.pyx", "stringsource", }; /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":104 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":278 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":329 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":960 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":104 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":329 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":960 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* PyObjectSetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS #define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); #else #define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) #define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* SetNameInClass.proto */ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) #elif CYTHON_COMPILING_IN_CPYTHON #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) #else #define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) #endif /* FetchCommonType.proto */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); /* CythonFunction.proto */ #define __Pyx_CyFunction_USED 1 #define __Pyx_CYFUNCTION_STATICMETHOD 0x01 #define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 #define __Pyx_CYFUNCTION_CCLASS 0x04 #define __Pyx_CyFunction_GetClosure(f)\ (((__pyx_CyFunctionObject *) (f))->func_closure) #define __Pyx_CyFunction_GetClassObj(f)\ (((__pyx_CyFunctionObject *) (f))->func_classobj) #define __Pyx_CyFunction_Defaults(type, f)\ ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) #define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) typedef struct { PyCFunctionObject func; #if PY_VERSION_HEX < 0x030500A0 PyObject *func_weakreflist; #endif PyObject *func_dict; PyObject *func_name; PyObject *func_qualname; PyObject *func_doc; PyObject *func_globals; PyObject *func_code; PyObject *func_closure; PyObject *func_classobj; void *defaults; int defaults_pyobjects; int flags; PyObject *defaults_tuple; PyObject *defaults_kwdict; PyObject *(*defaults_getter)(PyObject *); PyObject *func_annotations; } __pyx_CyFunctionObject; static PyTypeObject *__pyx_CyFunctionType = 0; #define __Pyx_CyFunction_NewEx(ml, flags, qualname, self, module, globals, code)\ __Pyx_CyFunction_New(__pyx_CyFunctionType, ml, flags, qualname, self, module, globals, code) static PyObject *__Pyx_CyFunction_New(PyTypeObject *, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject* code); static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, size_t size, int pyobjects); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, PyObject *dict); static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); static int __pyx_CyFunction_init(void); /* CalculateMetaclass.proto */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); /* Py3ClassCreate.proto */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc); static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint64_t(uint64_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint32_t(uint32_t value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE uint64_t __Pyx_PyInt_As_uint64_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE uint8_t __Pyx_PyInt_As_uint8_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_nn_uint8_t(PyObject *, int writable_flag); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* FunctionExport.proto */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'libc.stdint' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'nescient.crypto.chacha' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static int __pyx_v_8nescient_6crypto_6chacha_big_endian; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static void __pyx_f_8nescient_6crypto_6chacha__chacha_task(uint32_t *, uint8_t *, uint32_t *, uint32_t, uint64_t); /*proto*/ static uint32_t *__pyx_f_8nescient_6crypto_6chacha_bytes_to_words(uint8_t *, uint64_t); /*proto*/ static void __pyx_f_8nescient_6crypto_6chacha_byte_swap(uint8_t *, uint64_t); /*proto*/ static void __pyx_f_8nescient_6crypto_6chacha_chacha20(uint8_t *, uint32_t *, uint32_t *, uint32_t); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn_uint8_t = { "uint8_t", NULL, sizeof(uint8_t), { 0 }, 0, IS_UNSIGNED(uint8_t) ? 'U' : 'I', IS_UNSIGNED(uint8_t), 0 }; #define __Pyx_MODULE_NAME "nescient.crypto.chacha" extern int __pyx_module_is_main_nescient__crypto__chacha; int __pyx_module_is_main_nescient__crypto__chacha = 0; /* Implementation of 'nescient.crypto.chacha' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_l[] = "l"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_big[] = "big"; static const char __pyx_k_doc[] = "__doc__"; static const char __pyx_k_key[] = "key"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_sha[] = "sha"; static const char __pyx_k_stm[] = "stm"; static const char __pyx_k_sys[] = "sys"; static const char __pyx_k_auth[] = "auth"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_data[] = "data"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_init[] = "__init__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_self[] = "self"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_time[] = "time"; static const char __pyx_k_view[] = "view"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_count[] = "count"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_key_w[] = "key_w"; static const char __pyx_k_modes[] = "modes"; static const char __pyx_k_nonce[] = "nonce"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_sleep[] = "sleep"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_buffer[] = "buffer"; static const char __pyx_k_ccount[] = "ccount"; static const char __pyx_k_ctypes[] = "ctypes"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_little[] = "little"; static const char __pyx_k_module[] = "__module__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_Process[] = "Process"; static const char __pyx_k_c_ubyte[] = "c_ubyte"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_nonce_w[] = "nonce_w"; static const char __pyx_k_prepare[] = "__prepare__"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_RawArray[] = "RawArray"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_qualname[] = "__qualname__"; static const char __pyx_k_randbits[] = "randbits"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_to_bytes[] = "to_bytes"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_byteorder[] = "byteorder"; static const char __pyx_k_cpu_count[] = "cpu_count"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_metaclass[] = "__metaclass__"; static const char __pyx_k_n_threads[] = "n_threads"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_chunk_size[] = "chunk_size"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_chacha_task[] = "_chacha_task"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_ChaChaCrypter[] = "ChaChaCrypter"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_chacha_decrypt[] = "chacha_decrypt"; static const char __pyx_k_chacha_encrypt[] = "chacha_encrypt"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_active_children[] = "active_children"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_multiprocessing[] = "multiprocessing"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_blocks_per_chunk[] = "blocks_per_chunk"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_force_single_thread[] = "force_single_thread"; static const char __pyx_k_ChaChaCrypter___init[] = "ChaChaCrypter.__init__"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_nescient_crypto_tools[] = "nescient.crypto.tools"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_nescient_crypto_chacha[] = "nescient.crypto.chacha"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_ChaChaCrypter__chacha_task[] = "ChaChaCrypter._chacha_task"; static const char __pyx_k_nescient_crypto_chacha_pyx[] = "nescient/crypto/chacha.pyx"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_ChaChaCrypter_chacha_encrypt[] = "ChaChaCrypter.chacha_encrypt"; static const char __pyx_k_multiprocessing_sharedctypes[] = "multiprocessing.sharedctypes"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_A_Crypter_object_used_for_encry[] = " A Crypter object used for encrypting or decrypting arbitrary data using the ChaCha stream cipher.\n\n Attributes:\n modes (list): A list of cipher modes supported by the algorithm.\n auth (list): A list of authentication modes supported by the algorithm.\n\n Args:\n key (bytes): The 256 bit key used to encrypt/decrypt data.\n "; static const char __pyx_k_Classes_and_Cython_functions_fo[] = " Classes and (Cython) functions for working with the ChaCha20 stream cipher.\n\nSee RFC 7539 for the cipher specification.\n"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_A_Crypter_object_used_for_encry; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_ChaChaCrypter; static PyObject *__pyx_n_s_ChaChaCrypter___init; static PyObject *__pyx_n_s_ChaChaCrypter__chacha_task; static PyObject *__pyx_n_s_ChaChaCrypter_chacha_encrypt; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_Process; static PyObject *__pyx_n_s_RawArray; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_active_children; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_auth; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_big; static PyObject *__pyx_n_s_blocks_per_chunk; static PyObject *__pyx_n_s_buffer; static PyObject *__pyx_n_s_byteorder; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_c_ubyte; static PyObject *__pyx_n_s_ccount; static PyObject *__pyx_n_s_chacha_decrypt; static PyObject *__pyx_n_s_chacha_encrypt; static PyObject *__pyx_n_s_chacha_task; static PyObject *__pyx_n_s_chunk_size; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_count; static PyObject *__pyx_n_s_cpu_count; static PyObject *__pyx_n_s_ctypes; static PyObject *__pyx_n_s_data; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_doc; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_force_single_thread; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_init; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_key; static PyObject *__pyx_n_s_key_w; static PyObject *__pyx_n_s_l; static PyObject *__pyx_n_s_little; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_metaclass; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_modes; static PyObject *__pyx_n_s_module; static PyObject *__pyx_n_s_multiprocessing; static PyObject *__pyx_n_s_multiprocessing_sharedctypes; static PyObject *__pyx_n_s_n_threads; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_nescient_crypto_chacha; static PyObject *__pyx_kp_s_nescient_crypto_chacha_pyx; static PyObject *__pyx_n_s_nescient_crypto_tools; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_nonce; static PyObject *__pyx_n_s_nonce_w; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_prepare; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_qualname; static PyObject *__pyx_n_s_randbits; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_self; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_sha; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_sleep; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stm; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_sys; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_time; static PyObject *__pyx_n_s_to_bytes; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_view; static PyObject *__pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter_2_chacha_task(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_nonce, PyObject *__pyx_v_count); /* proto */ static PyObject *__pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter_4chacha_encrypt(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_nonce, PyObject *__pyx_v_count, PyObject *__pyx_v_force_single_thread); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_12; static PyObject *__pyx_int_96; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_slice_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__20; static PyObject *__pyx_slice__21; static PyObject *__pyx_slice__22; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__33; static PyObject *__pyx_tuple__34; static PyObject *__pyx_tuple__35; static PyObject *__pyx_tuple__36; static PyObject *__pyx_tuple__37; static PyObject *__pyx_tuple__38; static PyObject *__pyx_tuple__39; static PyObject *__pyx_codeobj__27; static PyObject *__pyx_codeobj__29; static PyObject *__pyx_codeobj__32; static PyObject *__pyx_codeobj__40; /* Late includes */ /* "nescient/crypto/chacha.pyx":25 * * # Little endian bytes to 32-bit words conversion * cdef uint32_t * bytes_to_words(uint8_t * b, uint64_t l): # <<<<<<<<<<<<<< * cdef uint32_t * w = <uint32_t *>PyMem_Malloc(l) * cdef uint64_t i */ static uint32_t *__pyx_f_8nescient_6crypto_6chacha_bytes_to_words(uint8_t *__pyx_v_b, uint64_t __pyx_v_l) { uint32_t *__pyx_v_w; uint64_t __pyx_v_i; uint32_t *__pyx_r; __Pyx_RefNannyDeclarations uint64_t __pyx_t_1; uint64_t __pyx_t_2; uint64_t __pyx_t_3; __Pyx_RefNannySetupContext("bytes_to_words", 0); /* "nescient/crypto/chacha.pyx":26 * # Little endian bytes to 32-bit words conversion * cdef uint32_t * bytes_to_words(uint8_t * b, uint64_t l): * cdef uint32_t * w = <uint32_t *>PyMem_Malloc(l) # <<<<<<<<<<<<<< * cdef uint64_t i * for i in range(0, l, 4): */ __pyx_v_w = ((uint32_t *)PyMem_Malloc(__pyx_v_l)); /* "nescient/crypto/chacha.pyx":28 * cdef uint32_t * w = <uint32_t *>PyMem_Malloc(l) * cdef uint64_t i * for i in range(0, l, 4): # <<<<<<<<<<<<<< * w[i>>2] = b[i] | (b[i+1] << 8) | (b[i+2] << 16) | (b[i+3] << 24) * return w */ __pyx_t_1 = __pyx_v_l; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=4) { __pyx_v_i = __pyx_t_3; /* "nescient/crypto/chacha.pyx":29 * cdef uint64_t i * for i in range(0, l, 4): * w[i>>2] = b[i] | (b[i+1] << 8) | (b[i+2] << 16) | (b[i+3] << 24) # <<<<<<<<<<<<<< * return w * */ (__pyx_v_w[(__pyx_v_i >> 2)]) = ((((__pyx_v_b[__pyx_v_i]) | ((__pyx_v_b[(__pyx_v_i + 1)]) << 8)) | ((__pyx_v_b[(__pyx_v_i + 2)]) << 16)) | ((__pyx_v_b[(__pyx_v_i + 3)]) << 24)); } /* "nescient/crypto/chacha.pyx":30 * for i in range(0, l, 4): * w[i>>2] = b[i] | (b[i+1] << 8) | (b[i+2] << 16) | (b[i+3] << 24) * return w # <<<<<<<<<<<<<< * * # # Little endian 32-bit words to bytes conversion */ __pyx_r = __pyx_v_w; goto __pyx_L0; /* "nescient/crypto/chacha.pyx":25 * * # Little endian bytes to 32-bit words conversion * cdef uint32_t * bytes_to_words(uint8_t * b, uint64_t l): # <<<<<<<<<<<<<< * cdef uint32_t * w = <uint32_t *>PyMem_Malloc(l) * cdef uint64_t i */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nescient/crypto/chacha.pyx":49 * * # byteswap * cdef void byte_swap(uint8_t * b, uint64_t l) nogil: # <<<<<<<<<<<<<< * cdef uint64_t i * cdef uint8_t temp */ static void __pyx_f_8nescient_6crypto_6chacha_byte_swap(uint8_t *__pyx_v_b, uint64_t __pyx_v_l) { uint64_t __pyx_v_i; uint8_t __pyx_v_temp; uint64_t __pyx_t_1; uint64_t __pyx_t_2; uint64_t __pyx_t_3; /* "nescient/crypto/chacha.pyx":52 * cdef uint64_t i * cdef uint8_t temp * for i in range(0, l, 4): # <<<<<<<<<<<<<< * temp = b[i] * b[i] = b[i+3] */ __pyx_t_1 = __pyx_v_l; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=4) { __pyx_v_i = __pyx_t_3; /* "nescient/crypto/chacha.pyx":53 * cdef uint8_t temp * for i in range(0, l, 4): * temp = b[i] # <<<<<<<<<<<<<< * b[i] = b[i+3] * b[i+3] = temp */ __pyx_v_temp = (__pyx_v_b[__pyx_v_i]); /* "nescient/crypto/chacha.pyx":54 * for i in range(0, l, 4): * temp = b[i] * b[i] = b[i+3] # <<<<<<<<<<<<<< * b[i+3] = temp * temp = b[i+1] */ (__pyx_v_b[__pyx_v_i]) = (__pyx_v_b[(__pyx_v_i + 3)]); /* "nescient/crypto/chacha.pyx":55 * temp = b[i] * b[i] = b[i+3] * b[i+3] = temp # <<<<<<<<<<<<<< * temp = b[i+1] * b[i+1] = b[i+2] */ (__pyx_v_b[(__pyx_v_i + 3)]) = __pyx_v_temp; /* "nescient/crypto/chacha.pyx":56 * b[i] = b[i+3] * b[i+3] = temp * temp = b[i+1] # <<<<<<<<<<<<<< * b[i+1] = b[i+2] * b[i+2] = temp */ __pyx_v_temp = (__pyx_v_b[(__pyx_v_i + 1)]); /* "nescient/crypto/chacha.pyx":57 * b[i+3] = temp * temp = b[i+1] * b[i+1] = b[i+2] # <<<<<<<<<<<<<< * b[i+2] = temp * return */ (__pyx_v_b[(__pyx_v_i + 1)]) = (__pyx_v_b[(__pyx_v_i + 2)]); /* "nescient/crypto/chacha.pyx":58 * temp = b[i+1] * b[i+1] = b[i+2] * b[i+2] = temp # <<<<<<<<<<<<<< * return * */ (__pyx_v_b[(__pyx_v_i + 2)]) = __pyx_v_temp; } /* "nescient/crypto/chacha.pyx":59 * b[i+1] = b[i+2] * b[i+2] = temp * return # <<<<<<<<<<<<<< * * */ goto __pyx_L0; /* "nescient/crypto/chacha.pyx":49 * * # byteswap * cdef void byte_swap(uint8_t * b, uint64_t l) nogil: # <<<<<<<<<<<<<< * cdef uint64_t i * cdef uint8_t temp */ /* function exit code */ __pyx_L0:; } /* "nescient/crypto/chacha.pyx":76 * * # Generates 64 keystream bytes from a 256-bit key, a 96-bit nonce, and a 32-bit counter * cdef void chacha20(uint8_t * key_stream, uint32_t * key, uint32_t * nonce, uint32_t count) nogil: # <<<<<<<<<<<<<< * cdef uint32_t state[16] * cdef uint32_t start_state[16] */ static void __pyx_f_8nescient_6crypto_6chacha_chacha20(uint8_t *__pyx_v_key_stream, uint32_t *__pyx_v_key, uint32_t *__pyx_v_nonce, uint32_t __pyx_v_count) { uint32_t __pyx_v_state[16]; uint32_t __pyx_v_start_state[16]; uint8_t __pyx_v_i; uint8_t *__pyx_v_b; uint8_t __pyx_t_1; uint8_t __pyx_t_2; int __pyx_t_3; /* "nescient/crypto/chacha.pyx":81 * cdef uint8_t i * # First four words are constants * state[0] = 0x61707865; state[1] = 0x3320646e; state[2] = 0x79622d32; state[3] = 0x6b206574 # <<<<<<<<<<<<<< * # Words 4-11 are the key * state[4] = key[0]; state[5] = key[1]; state[6] = key[2]; state[7] = key[3]; state[8] = key[4]; state[9] = key[5] */ (__pyx_v_state[0]) = 0x61707865; (__pyx_v_state[1]) = 0x3320646e; (__pyx_v_state[2]) = 0x79622d32; (__pyx_v_state[3]) = 0x6b206574; /* "nescient/crypto/chacha.pyx":83 * state[0] = 0x61707865; state[1] = 0x3320646e; state[2] = 0x79622d32; state[3] = 0x6b206574 * # Words 4-11 are the key * state[4] = key[0]; state[5] = key[1]; state[6] = key[2]; state[7] = key[3]; state[8] = key[4]; state[9] = key[5] # <<<<<<<<<<<<<< * state[10] = key[6]; state[11] = key[7] * # Word 12 is the count */ (__pyx_v_state[4]) = (__pyx_v_key[0]); (__pyx_v_state[5]) = (__pyx_v_key[1]); (__pyx_v_state[6]) = (__pyx_v_key[2]); (__pyx_v_state[7]) = (__pyx_v_key[3]); (__pyx_v_state[8]) = (__pyx_v_key[4]); (__pyx_v_state[9]) = (__pyx_v_key[5]); /* "nescient/crypto/chacha.pyx":84 * # Words 4-11 are the key * state[4] = key[0]; state[5] = key[1]; state[6] = key[2]; state[7] = key[3]; state[8] = key[4]; state[9] = key[5] * state[10] = key[6]; state[11] = key[7] # <<<<<<<<<<<<<< * # Word 12 is the count * state[12] = count */ (__pyx_v_state[10]) = (__pyx_v_key[6]); (__pyx_v_state[11]) = (__pyx_v_key[7]); /* "nescient/crypto/chacha.pyx":86 * state[10] = key[6]; state[11] = key[7] * # Word 12 is the count * state[12] = count # <<<<<<<<<<<<<< * # Words 13-15 are the nonce * state[13] = nonce[0]; state[14] = nonce[1]; state[15] = nonce[2] */ (__pyx_v_state[12]) = __pyx_v_count; /* "nescient/crypto/chacha.pyx":88 * state[12] = count * # Words 13-15 are the nonce * state[13] = nonce[0]; state[14] = nonce[1]; state[15] = nonce[2] # <<<<<<<<<<<<<< * # Copy the state into the start state for later * for i in range(16): */ (__pyx_v_state[13]) = (__pyx_v_nonce[0]); (__pyx_v_state[14]) = (__pyx_v_nonce[1]); (__pyx_v_state[15]) = (__pyx_v_nonce[2]); /* "nescient/crypto/chacha.pyx":90 * state[13] = nonce[0]; state[14] = nonce[1]; state[15] = nonce[2] * # Copy the state into the start state for later * for i in range(16): # <<<<<<<<<<<<<< * start_state[i] = state[i] * # Perform the ChaCha20 rounds */ for (__pyx_t_1 = 0; __pyx_t_1 < 16; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "nescient/crypto/chacha.pyx":91 * # Copy the state into the start state for later * for i in range(16): * start_state[i] = state[i] # <<<<<<<<<<<<<< * # Perform the ChaCha20 rounds * for i in range(10): */ (__pyx_v_start_state[__pyx_v_i]) = (__pyx_v_state[__pyx_v_i]); } /* "nescient/crypto/chacha.pyx":93 * start_state[i] = state[i] * # Perform the ChaCha20 rounds * for i in range(10): # <<<<<<<<<<<<<< * # Quarter round 0, 4, 8, 12 * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 16) | (state[12] >> 16) */ for (__pyx_t_1 = 0; __pyx_t_1 < 10; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "nescient/crypto/chacha.pyx":95 * for i in range(10): * # Quarter round 0, 4, 8, 12 * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 16) | (state[12] >> 16) # <<<<<<<<<<<<<< * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 12) | (state[4] >> 20) * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 8) | (state[12] >> 24) */ (__pyx_v_state[0]) = ((__pyx_v_state[0]) + (__pyx_v_state[4])); (__pyx_v_state[12]) = ((__pyx_v_state[12]) ^ (__pyx_v_state[0])); (__pyx_v_state[12]) = (((__pyx_v_state[12]) << 16) | ((__pyx_v_state[12]) >> 16)); /* "nescient/crypto/chacha.pyx":96 * # Quarter round 0, 4, 8, 12 * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 16) | (state[12] >> 16) * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 12) | (state[4] >> 20) # <<<<<<<<<<<<<< * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 8) | (state[12] >> 24) * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 7) | (state[4] >> 25) */ (__pyx_v_state[8]) = ((__pyx_v_state[8]) + (__pyx_v_state[12])); (__pyx_v_state[4]) = ((__pyx_v_state[4]) ^ (__pyx_v_state[8])); (__pyx_v_state[4]) = (((__pyx_v_state[4]) << 12) | ((__pyx_v_state[4]) >> 20)); /* "nescient/crypto/chacha.pyx":97 * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 16) | (state[12] >> 16) * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 12) | (state[4] >> 20) * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 8) | (state[12] >> 24) # <<<<<<<<<<<<<< * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 7) | (state[4] >> 25) * # Quarter round 1, 5, 9, 13 */ (__pyx_v_state[0]) = ((__pyx_v_state[0]) + (__pyx_v_state[4])); (__pyx_v_state[12]) = ((__pyx_v_state[12]) ^ (__pyx_v_state[0])); (__pyx_v_state[12]) = (((__pyx_v_state[12]) << 8) | ((__pyx_v_state[12]) >> 24)); /* "nescient/crypto/chacha.pyx":98 * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 12) | (state[4] >> 20) * state[0] = state[0] + state[4]; state[12] = state[12] ^ state[0]; state[12] = (state[12] << 8) | (state[12] >> 24) * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 7) | (state[4] >> 25) # <<<<<<<<<<<<<< * # Quarter round 1, 5, 9, 13 * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 16) | (state[13] >> 16) */ (__pyx_v_state[8]) = ((__pyx_v_state[8]) + (__pyx_v_state[12])); (__pyx_v_state[4]) = ((__pyx_v_state[4]) ^ (__pyx_v_state[8])); (__pyx_v_state[4]) = (((__pyx_v_state[4]) << 7) | ((__pyx_v_state[4]) >> 25)); /* "nescient/crypto/chacha.pyx":100 * state[8] = state[8] + state[12]; state[4] = state[4] ^ state[8]; state[4] = (state[4] << 7) | (state[4] >> 25) * # Quarter round 1, 5, 9, 13 * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 16) | (state[13] >> 16) # <<<<<<<<<<<<<< * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 12) | (state[5] >> 20) * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 8) | (state[13] >> 24) */ (__pyx_v_state[1]) = ((__pyx_v_state[1]) + (__pyx_v_state[5])); (__pyx_v_state[13]) = ((__pyx_v_state[13]) ^ (__pyx_v_state[1])); (__pyx_v_state[13]) = (((__pyx_v_state[13]) << 16) | ((__pyx_v_state[13]) >> 16)); /* "nescient/crypto/chacha.pyx":101 * # Quarter round 1, 5, 9, 13 * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 16) | (state[13] >> 16) * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 12) | (state[5] >> 20) # <<<<<<<<<<<<<< * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 8) | (state[13] >> 24) * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 7) | (state[5] >> 25) */ (__pyx_v_state[9]) = ((__pyx_v_state[9]) + (__pyx_v_state[13])); (__pyx_v_state[5]) = ((__pyx_v_state[5]) ^ (__pyx_v_state[9])); (__pyx_v_state[5]) = (((__pyx_v_state[5]) << 12) | ((__pyx_v_state[5]) >> 20)); /* "nescient/crypto/chacha.pyx":102 * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 16) | (state[13] >> 16) * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 12) | (state[5] >> 20) * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 8) | (state[13] >> 24) # <<<<<<<<<<<<<< * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 7) | (state[5] >> 25) * # Quarter round 2, 6, 10, 14 */ (__pyx_v_state[1]) = ((__pyx_v_state[1]) + (__pyx_v_state[5])); (__pyx_v_state[13]) = ((__pyx_v_state[13]) ^ (__pyx_v_state[1])); (__pyx_v_state[13]) = (((__pyx_v_state[13]) << 8) | ((__pyx_v_state[13]) >> 24)); /* "nescient/crypto/chacha.pyx":103 * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 12) | (state[5] >> 20) * state[1] = state[1] + state[5]; state[13] = state[13] ^ state[1]; state[13] = (state[13] << 8) | (state[13] >> 24) * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 7) | (state[5] >> 25) # <<<<<<<<<<<<<< * # Quarter round 2, 6, 10, 14 * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 16) | (state[14] >> 16) */ (__pyx_v_state[9]) = ((__pyx_v_state[9]) + (__pyx_v_state[13])); (__pyx_v_state[5]) = ((__pyx_v_state[5]) ^ (__pyx_v_state[9])); (__pyx_v_state[5]) = (((__pyx_v_state[5]) << 7) | ((__pyx_v_state[5]) >> 25)); /* "nescient/crypto/chacha.pyx":105 * state[9] = state[9] + state[13]; state[5] = state[5] ^ state[9]; state[5] = (state[5] << 7) | (state[5] >> 25) * # Quarter round 2, 6, 10, 14 * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 16) | (state[14] >> 16) # <<<<<<<<<<<<<< * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 12) | (state[6] >> 20) * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 8) | (state[14] >> 24) */ (__pyx_v_state[2]) = ((__pyx_v_state[2]) + (__pyx_v_state[6])); (__pyx_v_state[14]) = ((__pyx_v_state[14]) ^ (__pyx_v_state[2])); (__pyx_v_state[14]) = (((__pyx_v_state[14]) << 16) | ((__pyx_v_state[14]) >> 16)); /* "nescient/crypto/chacha.pyx":106 * # Quarter round 2, 6, 10, 14 * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 16) | (state[14] >> 16) * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 12) | (state[6] >> 20) # <<<<<<<<<<<<<< * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 8) | (state[14] >> 24) * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 7) | (state[6] >> 25) */ (__pyx_v_state[10]) = ((__pyx_v_state[10]) + (__pyx_v_state[14])); (__pyx_v_state[6]) = ((__pyx_v_state[6]) ^ (__pyx_v_state[10])); (__pyx_v_state[6]) = (((__pyx_v_state[6]) << 12) | ((__pyx_v_state[6]) >> 20)); /* "nescient/crypto/chacha.pyx":107 * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 16) | (state[14] >> 16) * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 12) | (state[6] >> 20) * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 8) | (state[14] >> 24) # <<<<<<<<<<<<<< * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 7) | (state[6] >> 25) * # Quarter round 3, 7, 11, 15 */ (__pyx_v_state[2]) = ((__pyx_v_state[2]) + (__pyx_v_state[6])); (__pyx_v_state[14]) = ((__pyx_v_state[14]) ^ (__pyx_v_state[2])); (__pyx_v_state[14]) = (((__pyx_v_state[14]) << 8) | ((__pyx_v_state[14]) >> 24)); /* "nescient/crypto/chacha.pyx":108 * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 12) | (state[6] >> 20) * state[2] = state[2] + state[6]; state[14] = state[14] ^ state[2]; state[14] = (state[14] << 8) | (state[14] >> 24) * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 7) | (state[6] >> 25) # <<<<<<<<<<<<<< * # Quarter round 3, 7, 11, 15 * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 16) | (state[15] >> 16) */ (__pyx_v_state[10]) = ((__pyx_v_state[10]) + (__pyx_v_state[14])); (__pyx_v_state[6]) = ((__pyx_v_state[6]) ^ (__pyx_v_state[10])); (__pyx_v_state[6]) = (((__pyx_v_state[6]) << 7) | ((__pyx_v_state[6]) >> 25)); /* "nescient/crypto/chacha.pyx":110 * state[10] = state[10] + state[14]; state[6] = state[6] ^ state[10]; state[6] = (state[6] << 7) | (state[6] >> 25) * # Quarter round 3, 7, 11, 15 * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 16) | (state[15] >> 16) # <<<<<<<<<<<<<< * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 12) | (state[7] >> 20) * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 8) | (state[15] >> 24) */ (__pyx_v_state[3]) = ((__pyx_v_state[3]) + (__pyx_v_state[7])); (__pyx_v_state[15]) = ((__pyx_v_state[15]) ^ (__pyx_v_state[3])); (__pyx_v_state[15]) = (((__pyx_v_state[15]) << 16) | ((__pyx_v_state[15]) >> 16)); /* "nescient/crypto/chacha.pyx":111 * # Quarter round 3, 7, 11, 15 * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 16) | (state[15] >> 16) * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 12) | (state[7] >> 20) # <<<<<<<<<<<<<< * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 8) | (state[15] >> 24) * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 7) | (state[7] >> 25) */ (__pyx_v_state[11]) = ((__pyx_v_state[11]) + (__pyx_v_state[15])); (__pyx_v_state[7]) = ((__pyx_v_state[7]) ^ (__pyx_v_state[11])); (__pyx_v_state[7]) = (((__pyx_v_state[7]) << 12) | ((__pyx_v_state[7]) >> 20)); /* "nescient/crypto/chacha.pyx":112 * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 16) | (state[15] >> 16) * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 12) | (state[7] >> 20) * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 8) | (state[15] >> 24) # <<<<<<<<<<<<<< * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 7) | (state[7] >> 25) * # Quarter round 0, 5, 10, 15 */ (__pyx_v_state[3]) = ((__pyx_v_state[3]) + (__pyx_v_state[7])); (__pyx_v_state[15]) = ((__pyx_v_state[15]) ^ (__pyx_v_state[3])); (__pyx_v_state[15]) = (((__pyx_v_state[15]) << 8) | ((__pyx_v_state[15]) >> 24)); /* "nescient/crypto/chacha.pyx":113 * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 12) | (state[7] >> 20) * state[3] = state[3] + state[7]; state[15] = state[15] ^ state[3]; state[15] = (state[15] << 8) | (state[15] >> 24) * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 7) | (state[7] >> 25) # <<<<<<<<<<<<<< * # Quarter round 0, 5, 10, 15 * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 16) | (state[15] >> 16) */ (__pyx_v_state[11]) = ((__pyx_v_state[11]) + (__pyx_v_state[15])); (__pyx_v_state[7]) = ((__pyx_v_state[7]) ^ (__pyx_v_state[11])); (__pyx_v_state[7]) = (((__pyx_v_state[7]) << 7) | ((__pyx_v_state[7]) >> 25)); /* "nescient/crypto/chacha.pyx":115 * state[11] = state[11] + state[15]; state[7] = state[7] ^ state[11]; state[7] = (state[7] << 7) | (state[7] >> 25) * # Quarter round 0, 5, 10, 15 * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 16) | (state[15] >> 16) # <<<<<<<<<<<<<< * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 12) | (state[5] >> 20) * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 8) | (state[15] >> 24) */ (__pyx_v_state[0]) = ((__pyx_v_state[0]) + (__pyx_v_state[5])); (__pyx_v_state[15]) = ((__pyx_v_state[15]) ^ (__pyx_v_state[0])); (__pyx_v_state[15]) = (((__pyx_v_state[15]) << 16) | ((__pyx_v_state[15]) >> 16)); /* "nescient/crypto/chacha.pyx":116 * # Quarter round 0, 5, 10, 15 * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 16) | (state[15] >> 16) * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 12) | (state[5] >> 20) # <<<<<<<<<<<<<< * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 8) | (state[15] >> 24) * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 7) | (state[5] >> 25) */ (__pyx_v_state[10]) = ((__pyx_v_state[10]) + (__pyx_v_state[15])); (__pyx_v_state[5]) = ((__pyx_v_state[5]) ^ (__pyx_v_state[10])); (__pyx_v_state[5]) = (((__pyx_v_state[5]) << 12) | ((__pyx_v_state[5]) >> 20)); /* "nescient/crypto/chacha.pyx":117 * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 16) | (state[15] >> 16) * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 12) | (state[5] >> 20) * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 8) | (state[15] >> 24) # <<<<<<<<<<<<<< * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 7) | (state[5] >> 25) * # Quarter round 1, 6, 11, 12 */ (__pyx_v_state[0]) = ((__pyx_v_state[0]) + (__pyx_v_state[5])); (__pyx_v_state[15]) = ((__pyx_v_state[15]) ^ (__pyx_v_state[0])); (__pyx_v_state[15]) = (((__pyx_v_state[15]) << 8) | ((__pyx_v_state[15]) >> 24)); /* "nescient/crypto/chacha.pyx":118 * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 12) | (state[5] >> 20) * state[0] = state[0] + state[5]; state[15] = state[15] ^ state[0]; state[15] = (state[15] << 8) | (state[15] >> 24) * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 7) | (state[5] >> 25) # <<<<<<<<<<<<<< * # Quarter round 1, 6, 11, 12 * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 16) | (state[12] >> 16) */ (__pyx_v_state[10]) = ((__pyx_v_state[10]) + (__pyx_v_state[15])); (__pyx_v_state[5]) = ((__pyx_v_state[5]) ^ (__pyx_v_state[10])); (__pyx_v_state[5]) = (((__pyx_v_state[5]) << 7) | ((__pyx_v_state[5]) >> 25)); /* "nescient/crypto/chacha.pyx":120 * state[10] = state[10] + state[15]; state[5] = state[5] ^ state[10]; state[5] = (state[5] << 7) | (state[5] >> 25) * # Quarter round 1, 6, 11, 12 * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 16) | (state[12] >> 16) # <<<<<<<<<<<<<< * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 12) | (state[6] >> 20) * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 8) | (state[12] >> 24) */ (__pyx_v_state[1]) = ((__pyx_v_state[1]) + (__pyx_v_state[6])); (__pyx_v_state[12]) = ((__pyx_v_state[12]) ^ (__pyx_v_state[1])); (__pyx_v_state[12]) = (((__pyx_v_state[12]) << 16) | ((__pyx_v_state[12]) >> 16)); /* "nescient/crypto/chacha.pyx":121 * # Quarter round 1, 6, 11, 12 * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 16) | (state[12] >> 16) * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 12) | (state[6] >> 20) # <<<<<<<<<<<<<< * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 8) | (state[12] >> 24) * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 7) | (state[6] >> 25) */ (__pyx_v_state[11]) = ((__pyx_v_state[11]) + (__pyx_v_state[12])); (__pyx_v_state[6]) = ((__pyx_v_state[6]) ^ (__pyx_v_state[11])); (__pyx_v_state[6]) = (((__pyx_v_state[6]) << 12) | ((__pyx_v_state[6]) >> 20)); /* "nescient/crypto/chacha.pyx":122 * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 16) | (state[12] >> 16) * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 12) | (state[6] >> 20) * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 8) | (state[12] >> 24) # <<<<<<<<<<<<<< * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 7) | (state[6] >> 25) * # Quarter round 2, 7, 8, 13 */ (__pyx_v_state[1]) = ((__pyx_v_state[1]) + (__pyx_v_state[6])); (__pyx_v_state[12]) = ((__pyx_v_state[12]) ^ (__pyx_v_state[1])); (__pyx_v_state[12]) = (((__pyx_v_state[12]) << 8) | ((__pyx_v_state[12]) >> 24)); /* "nescient/crypto/chacha.pyx":123 * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 12) | (state[6] >> 20) * state[1] = state[1] + state[6]; state[12] = state[12] ^ state[1]; state[12] = (state[12] << 8) | (state[12] >> 24) * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 7) | (state[6] >> 25) # <<<<<<<<<<<<<< * # Quarter round 2, 7, 8, 13 * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 16) | (state[13] >> 16) */ (__pyx_v_state[11]) = ((__pyx_v_state[11]) + (__pyx_v_state[12])); (__pyx_v_state[6]) = ((__pyx_v_state[6]) ^ (__pyx_v_state[11])); (__pyx_v_state[6]) = (((__pyx_v_state[6]) << 7) | ((__pyx_v_state[6]) >> 25)); /* "nescient/crypto/chacha.pyx":125 * state[11] = state[11] + state[12]; state[6] = state[6] ^ state[11]; state[6] = (state[6] << 7) | (state[6] >> 25) * # Quarter round 2, 7, 8, 13 * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 16) | (state[13] >> 16) # <<<<<<<<<<<<<< * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 12) | (state[7] >> 20) * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 8) | (state[13] >> 24) */ (__pyx_v_state[2]) = ((__pyx_v_state[2]) + (__pyx_v_state[7])); (__pyx_v_state[13]) = ((__pyx_v_state[13]) ^ (__pyx_v_state[2])); (__pyx_v_state[13]) = (((__pyx_v_state[13]) << 16) | ((__pyx_v_state[13]) >> 16)); /* "nescient/crypto/chacha.pyx":126 * # Quarter round 2, 7, 8, 13 * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 16) | (state[13] >> 16) * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 12) | (state[7] >> 20) # <<<<<<<<<<<<<< * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 8) | (state[13] >> 24) * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 7) | (state[7] >> 25) */ (__pyx_v_state[8]) = ((__pyx_v_state[8]) + (__pyx_v_state[13])); (__pyx_v_state[7]) = ((__pyx_v_state[7]) ^ (__pyx_v_state[8])); (__pyx_v_state[7]) = (((__pyx_v_state[7]) << 12) | ((__pyx_v_state[7]) >> 20)); /* "nescient/crypto/chacha.pyx":127 * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 16) | (state[13] >> 16) * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 12) | (state[7] >> 20) * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 8) | (state[13] >> 24) # <<<<<<<<<<<<<< * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 7) | (state[7] >> 25) * # Quarter round 3, 4, 9, 14 */ (__pyx_v_state[2]) = ((__pyx_v_state[2]) + (__pyx_v_state[7])); (__pyx_v_state[13]) = ((__pyx_v_state[13]) ^ (__pyx_v_state[2])); (__pyx_v_state[13]) = (((__pyx_v_state[13]) << 8) | ((__pyx_v_state[13]) >> 24)); /* "nescient/crypto/chacha.pyx":128 * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 12) | (state[7] >> 20) * state[2] = state[2] + state[7]; state[13] = state[13] ^ state[2]; state[13] = (state[13] << 8) | (state[13] >> 24) * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 7) | (state[7] >> 25) # <<<<<<<<<<<<<< * # Quarter round 3, 4, 9, 14 * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 16) | (state[14] >> 16) */ (__pyx_v_state[8]) = ((__pyx_v_state[8]) + (__pyx_v_state[13])); (__pyx_v_state[7]) = ((__pyx_v_state[7]) ^ (__pyx_v_state[8])); (__pyx_v_state[7]) = (((__pyx_v_state[7]) << 7) | ((__pyx_v_state[7]) >> 25)); /* "nescient/crypto/chacha.pyx":130 * state[8] = state[8] + state[13]; state[7] = state[7] ^ state[8]; state[7] = (state[7] << 7) | (state[7] >> 25) * # Quarter round 3, 4, 9, 14 * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 16) | (state[14] >> 16) # <<<<<<<<<<<<<< * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 12) | (state[4] >> 20) * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 8) | (state[14] >> 24) */ (__pyx_v_state[3]) = ((__pyx_v_state[3]) + (__pyx_v_state[4])); (__pyx_v_state[14]) = ((__pyx_v_state[14]) ^ (__pyx_v_state[3])); (__pyx_v_state[14]) = (((__pyx_v_state[14]) << 16) | ((__pyx_v_state[14]) >> 16)); /* "nescient/crypto/chacha.pyx":131 * # Quarter round 3, 4, 9, 14 * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 16) | (state[14] >> 16) * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 12) | (state[4] >> 20) # <<<<<<<<<<<<<< * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 8) | (state[14] >> 24) * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 7) | (state[4] >> 25) */ (__pyx_v_state[9]) = ((__pyx_v_state[9]) + (__pyx_v_state[14])); (__pyx_v_state[4]) = ((__pyx_v_state[4]) ^ (__pyx_v_state[9])); (__pyx_v_state[4]) = (((__pyx_v_state[4]) << 12) | ((__pyx_v_state[4]) >> 20)); /* "nescient/crypto/chacha.pyx":132 * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 16) | (state[14] >> 16) * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 12) | (state[4] >> 20) * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 8) | (state[14] >> 24) # <<<<<<<<<<<<<< * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 7) | (state[4] >> 25) * # Add the original state with the result */ (__pyx_v_state[3]) = ((__pyx_v_state[3]) + (__pyx_v_state[4])); (__pyx_v_state[14]) = ((__pyx_v_state[14]) ^ (__pyx_v_state[3])); (__pyx_v_state[14]) = (((__pyx_v_state[14]) << 8) | ((__pyx_v_state[14]) >> 24)); /* "nescient/crypto/chacha.pyx":133 * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 12) | (state[4] >> 20) * state[3] = state[3] + state[4]; state[14] = state[14] ^ state[3]; state[14] = (state[14] << 8) | (state[14] >> 24) * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 7) | (state[4] >> 25) # <<<<<<<<<<<<<< * # Add the original state with the result * for i in range(16): */ (__pyx_v_state[9]) = ((__pyx_v_state[9]) + (__pyx_v_state[14])); (__pyx_v_state[4]) = ((__pyx_v_state[4]) ^ (__pyx_v_state[9])); (__pyx_v_state[4]) = (((__pyx_v_state[4]) << 7) | ((__pyx_v_state[4]) >> 25)); } /* "nescient/crypto/chacha.pyx":135 * state[9] = state[9] + state[14]; state[4] = state[4] ^ state[9]; state[4] = (state[4] << 7) | (state[4] >> 25) * # Add the original state with the result * for i in range(16): # <<<<<<<<<<<<<< * state[i] += start_state[i] * # Cast to bytes, and byteswap if necessary */ for (__pyx_t_1 = 0; __pyx_t_1 < 16; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "nescient/crypto/chacha.pyx":136 * # Add the original state with the result * for i in range(16): * state[i] += start_state[i] # <<<<<<<<<<<<<< * # Cast to bytes, and byteswap if necessary * cdef uint8_t * b = <uint8_t *>(state) */ __pyx_t_2 = __pyx_v_i; (__pyx_v_state[__pyx_t_2]) = ((__pyx_v_state[__pyx_t_2]) + (__pyx_v_start_state[__pyx_v_i])); } /* "nescient/crypto/chacha.pyx":138 * state[i] += start_state[i] * # Cast to bytes, and byteswap if necessary * cdef uint8_t * b = <uint8_t *>(state) # <<<<<<<<<<<<<< * for i in range(64): * key_stream[i] = b[i] */ __pyx_v_b = ((uint8_t *)__pyx_v_state); /* "nescient/crypto/chacha.pyx":139 * # Cast to bytes, and byteswap if necessary * cdef uint8_t * b = <uint8_t *>(state) * for i in range(64): # <<<<<<<<<<<<<< * key_stream[i] = b[i] * if big_endian: */ for (__pyx_t_1 = 0; __pyx_t_1 < 64; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "nescient/crypto/chacha.pyx":140 * cdef uint8_t * b = <uint8_t *>(state) * for i in range(64): * key_stream[i] = b[i] # <<<<<<<<<<<<<< * if big_endian: * byte_swap(key_stream, 64) */ (__pyx_v_key_stream[__pyx_v_i]) = (__pyx_v_b[__pyx_v_i]); } /* "nescient/crypto/chacha.pyx":141 * for i in range(64): * key_stream[i] = b[i] * if big_endian: # <<<<<<<<<<<<<< * byte_swap(key_stream, 64) * return */ __pyx_t_3 = (__pyx_v_8nescient_6crypto_6chacha_big_endian != 0); if (__pyx_t_3) { /* "nescient/crypto/chacha.pyx":142 * key_stream[i] = b[i] * if big_endian: * byte_swap(key_stream, 64) # <<<<<<<<<<<<<< * return * */ __pyx_f_8nescient_6crypto_6chacha_byte_swap(__pyx_v_key_stream, 64); /* "nescient/crypto/chacha.pyx":141 * for i in range(64): * key_stream[i] = b[i] * if big_endian: # <<<<<<<<<<<<<< * byte_swap(key_stream, 64) * return */ } /* "nescient/crypto/chacha.pyx":143 * if big_endian: * byte_swap(key_stream, 64) * return # <<<<<<<<<<<<<< * * cdef void _chacha_task(uint32_t * key_w, uint8_t * data, uint32_t * nonce_w, uint32_t count, */ goto __pyx_L0; /* "nescient/crypto/chacha.pyx":76 * * # Generates 64 keystream bytes from a 256-bit key, a 96-bit nonce, and a 32-bit counter * cdef void chacha20(uint8_t * key_stream, uint32_t * key, uint32_t * nonce, uint32_t count) nogil: # <<<<<<<<<<<<<< * cdef uint32_t state[16] * cdef uint32_t start_state[16] */ /* function exit code */ __pyx_L0:; } /* "nescient/crypto/chacha.pyx":145 * return * * cdef void _chacha_task(uint32_t * key_w, uint8_t * data, uint32_t * nonce_w, uint32_t count, # <<<<<<<<<<<<<< * uint64_t l) nogil: * # Initialize counter and working variables */ static void __pyx_f_8nescient_6crypto_6chacha__chacha_task(uint32_t *__pyx_v_key_w, uint8_t *__pyx_v_data, uint32_t *__pyx_v_nonce_w, uint32_t __pyx_v_count, uint64_t __pyx_v_l) { uint32_t __pyx_v_counter; uint32_t __pyx_v_n_blocks; uint8_t *__pyx_v_key_stream; uint8_t *__pyx_v_buffer; uint32_t __pyx_v_i; uint8_t __pyx_v_j; uint32_t __pyx_t_1; uint32_t __pyx_t_2; uint32_t __pyx_t_3; uint8_t __pyx_t_4; uint8_t __pyx_t_5; int __pyx_t_6; uint64_t __pyx_t_7; uint64_t __pyx_t_8; /* "nescient/crypto/chacha.pyx":148 * uint64_t l) nogil: * # Initialize counter and working variables * cdef uint32_t counter = count # <<<<<<<<<<<<<< * cdef uint32_t n_blocks = <uint32_t>(l//64) * cdef uint8_t * key_stream = <uint8_t *>malloc(64) */ __pyx_v_counter = __pyx_v_count; /* "nescient/crypto/chacha.pyx":149 * # Initialize counter and working variables * cdef uint32_t counter = count * cdef uint32_t n_blocks = <uint32_t>(l//64) # <<<<<<<<<<<<<< * cdef uint8_t * key_stream = <uint8_t *>malloc(64) * cdef uint8_t * buffer = data */ __pyx_v_n_blocks = ((uint32_t)(__pyx_v_l / 64)); /* "nescient/crypto/chacha.pyx":150 * cdef uint32_t counter = count * cdef uint32_t n_blocks = <uint32_t>(l//64) * cdef uint8_t * key_stream = <uint8_t *>malloc(64) # <<<<<<<<<<<<<< * cdef uint8_t * buffer = data * cdef uint32_t i */ __pyx_v_key_stream = ((uint8_t *)malloc(64)); /* "nescient/crypto/chacha.pyx":151 * cdef uint32_t n_blocks = <uint32_t>(l//64) * cdef uint8_t * key_stream = <uint8_t *>malloc(64) * cdef uint8_t * buffer = data # <<<<<<<<<<<<<< * cdef uint32_t i * cdef uint8_t j */ __pyx_v_buffer = __pyx_v_data; /* "nescient/crypto/chacha.pyx":154 * cdef uint32_t i * cdef uint8_t j * for i in range(n_blocks): # <<<<<<<<<<<<<< * chacha20(key_stream, key_w, nonce_w, counter+i) * for j in range(64): */ __pyx_t_1 = __pyx_v_n_blocks; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "nescient/crypto/chacha.pyx":155 * cdef uint8_t j * for i in range(n_blocks): * chacha20(key_stream, key_w, nonce_w, counter+i) # <<<<<<<<<<<<<< * for j in range(64): * buffer[j] ^= key_stream[j] */ __pyx_f_8nescient_6crypto_6chacha_chacha20(__pyx_v_key_stream, __pyx_v_key_w, __pyx_v_nonce_w, (__pyx_v_counter + __pyx_v_i)); /* "nescient/crypto/chacha.pyx":156 * for i in range(n_blocks): * chacha20(key_stream, key_w, nonce_w, counter+i) * for j in range(64): # <<<<<<<<<<<<<< * buffer[j] ^= key_stream[j] * buffer += 64 */ for (__pyx_t_4 = 0; __pyx_t_4 < 64; __pyx_t_4+=1) { __pyx_v_j = __pyx_t_4; /* "nescient/crypto/chacha.pyx":157 * chacha20(key_stream, key_w, nonce_w, counter+i) * for j in range(64): * buffer[j] ^= key_stream[j] # <<<<<<<<<<<<<< * buffer += 64 * i = n_blocks */ __pyx_t_5 = __pyx_v_j; (__pyx_v_buffer[__pyx_t_5]) = ((__pyx_v_buffer[__pyx_t_5]) ^ (__pyx_v_key_stream[__pyx_v_j])); } /* "nescient/crypto/chacha.pyx":158 * for j in range(64): * buffer[j] ^= key_stream[j] * buffer += 64 # <<<<<<<<<<<<<< * i = n_blocks * if l % 64 != 0: */ __pyx_v_buffer = (__pyx_v_buffer + 64); } /* "nescient/crypto/chacha.pyx":159 * buffer[j] ^= key_stream[j] * buffer += 64 * i = n_blocks # <<<<<<<<<<<<<< * if l % 64 != 0: * chacha20(key_stream, key_w, nonce_w, counter+i) */ __pyx_v_i = __pyx_v_n_blocks; /* "nescient/crypto/chacha.pyx":160 * buffer += 64 * i = n_blocks * if l % 64 != 0: # <<<<<<<<<<<<<< * chacha20(key_stream, key_w, nonce_w, counter+i) * for j in range(l % 64): */ __pyx_t_6 = (((__pyx_v_l % 64) != 0) != 0); if (__pyx_t_6) { /* "nescient/crypto/chacha.pyx":161 * i = n_blocks * if l % 64 != 0: * chacha20(key_stream, key_w, nonce_w, counter+i) # <<<<<<<<<<<<<< * for j in range(l % 64): * buffer[j] ^= key_stream[j] */ __pyx_f_8nescient_6crypto_6chacha_chacha20(__pyx_v_key_stream, __pyx_v_key_w, __pyx_v_nonce_w, (__pyx_v_counter + __pyx_v_i)); /* "nescient/crypto/chacha.pyx":162 * if l % 64 != 0: * chacha20(key_stream, key_w, nonce_w, counter+i) * for j in range(l % 64): # <<<<<<<<<<<<<< * buffer[j] ^= key_stream[j] * free(key_stream) */ __pyx_t_7 = (__pyx_v_l % 64); __pyx_t_8 = __pyx_t_7; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_8; __pyx_t_4+=1) { __pyx_v_j = __pyx_t_4; /* "nescient/crypto/chacha.pyx":163 * chacha20(key_stream, key_w, nonce_w, counter+i) * for j in range(l % 64): * buffer[j] ^= key_stream[j] # <<<<<<<<<<<<<< * free(key_stream) * return */ __pyx_t_5 = __pyx_v_j; (__pyx_v_buffer[__pyx_t_5]) = ((__pyx_v_buffer[__pyx_t_5]) ^ (__pyx_v_key_stream[__pyx_v_j])); } /* "nescient/crypto/chacha.pyx":160 * buffer += 64 * i = n_blocks * if l % 64 != 0: # <<<<<<<<<<<<<< * chacha20(key_stream, key_w, nonce_w, counter+i) * for j in range(l % 64): */ } /* "nescient/crypto/chacha.pyx":164 * for j in range(l % 64): * buffer[j] ^= key_stream[j] * free(key_stream) # <<<<<<<<<<<<<< * return * */ free(__pyx_v_key_stream); /* "nescient/crypto/chacha.pyx":165 * buffer[j] ^= key_stream[j] * free(key_stream) * return # <<<<<<<<<<<<<< * * #foo */ goto __pyx_L0; /* "nescient/crypto/chacha.pyx":145 * return * * cdef void _chacha_task(uint32_t * key_w, uint8_t * data, uint32_t * nonce_w, uint32_t count, # <<<<<<<<<<<<<< * uint64_t l) nogil: * # Initialize counter and working variables */ /* function exit code */ __pyx_L0:; } /* "nescient/crypto/chacha.pyx":181 * auth = ['sha'] * * def __init__(self, key): # <<<<<<<<<<<<<< * assert len(key) == 32 * self.key = key[:] */ /* Python wrapper */ static PyObject *__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_8nescient_6crypto_6chacha_13ChaChaCrypter_1__init__ = {"__init__", (PyCFunction)__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_key = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_key,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 181, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 181, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_key = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 181, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("nescient.crypto.chacha.ChaChaCrypter.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter___init__(__pyx_self, __pyx_v_self, __pyx_v_key); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__init__", 0); /* "nescient/crypto/chacha.pyx":182 * * def __init__(self, key): * assert len(key) == 32 # <<<<<<<<<<<<<< * self.key = key[:] * # Since this is a stream cipher encryption is the same as decryption */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 182, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == 32) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 182, __pyx_L1_error) } } #endif /* "nescient/crypto/chacha.pyx":183 * def __init__(self, key): * assert len(key) == 32 * self.key = key[:] # <<<<<<<<<<<<<< * # Since this is a stream cipher encryption is the same as decryption * self.chacha_decrypt = self.chacha_encrypt */ __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_key, 0, 0, NULL, NULL, &__pyx_slice_, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_key, __pyx_t_2) < 0) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nescient/crypto/chacha.pyx":185 * self.key = key[:] * # Since this is a stream cipher encryption is the same as decryption * self.chacha_decrypt = self.chacha_encrypt # <<<<<<<<<<<<<< * * # The function that actually performs ChaCha encryption/decryption */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_chacha_encrypt); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_chacha_decrypt, __pyx_t_2) < 0) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nescient/crypto/chacha.pyx":181 * auth = ['sha'] * * def __init__(self, key): # <<<<<<<<<<<<<< * assert len(key) == 32 * self.key = key[:] */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nescient.crypto.chacha.ChaChaCrypter.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nescient/crypto/chacha.pyx":188 * * # The function that actually performs ChaCha encryption/decryption * def _chacha_task(self, data, nonce, count=1): # <<<<<<<<<<<<<< * # Convert key from bytes to little-endian words * cdef uint32_t * key_w = bytes_to_words(self.key, 32) */ /* Python wrapper */ static PyObject *__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_3_chacha_task(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_8nescient_6crypto_6chacha_13ChaChaCrypter_3_chacha_task = {"_chacha_task", (PyCFunction)__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_3_chacha_task, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_3_chacha_task(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; PyObject *__pyx_v_nonce = 0; PyObject *__pyx_v_count = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_chacha_task (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,&__pyx_n_s_nonce,&__pyx_n_s_count,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_1)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_chacha_task", 0, 3, 4, 1); __PYX_ERR(0, 188, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nonce)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_chacha_task", 0, 3, 4, 2); __PYX_ERR(0, 188, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_count); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_chacha_task") < 0)) __PYX_ERR(0, 188, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_data = values[1]; __pyx_v_nonce = values[2]; __pyx_v_count = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_chacha_task", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 188, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("nescient.crypto.chacha.ChaChaCrypter._chacha_task", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter_2_chacha_task(__pyx_self, __pyx_v_self, __pyx_v_data, __pyx_v_nonce, __pyx_v_count); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter_2_chacha_task(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_nonce, PyObject *__pyx_v_count) { uint32_t *__pyx_v_key_w; uint32_t *__pyx_v_nonce_w; __Pyx_memviewslice __pyx_v_view = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; uint8_t *__pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_5; int __pyx_t_6; uint32_t __pyx_t_7; Py_ssize_t __pyx_t_8; __Pyx_RefNannySetupContext("_chacha_task", 0); /* "nescient/crypto/chacha.pyx":190 * def _chacha_task(self, data, nonce, count=1): * # Convert key from bytes to little-endian words * cdef uint32_t * key_w = bytes_to_words(self.key, 32) # <<<<<<<<<<<<<< * # Convert the nonce into an array of 32-bit words * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_key); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_AsWritableUString(__pyx_t_1); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 190, __pyx_L1_error) __pyx_v_key_w = __pyx_f_8nescient_6crypto_6chacha_bytes_to_words(__pyx_t_2, 32); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":192 * cdef uint32_t * key_w = bytes_to_words(self.key, 32) * # Convert the nonce into an array of 32-bit words * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) # <<<<<<<<<<<<<< * # Create a typed memoryview of data and pass its address * cdef uint8_t[:] view = data */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_nonce, __pyx_n_s_to_bytes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_AsWritableUString(__pyx_t_3); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 192, __pyx_L1_error) __pyx_v_nonce_w = __pyx_f_8nescient_6crypto_6chacha_bytes_to_words(__pyx_t_2, 12); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "nescient/crypto/chacha.pyx":194 * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) * # Create a typed memoryview of data and pass its address * cdef uint8_t[:] view = data # <<<<<<<<<<<<<< * _chacha_task(key_w, &view[0], nonce_w, count, len(data)) * PyMem_Free(key_w) */ __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_nn_uint8_t(__pyx_v_data, PyBUF_WRITABLE); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 194, __pyx_L1_error) __pyx_v_view = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; /* "nescient/crypto/chacha.pyx":195 * # Create a typed memoryview of data and pass its address * cdef uint8_t[:] view = data * _chacha_task(key_w, &view[0], nonce_w, count, len(data)) # <<<<<<<<<<<<<< * PyMem_Free(key_w) * PyMem_Free(nonce_w) */ __pyx_t_5 = 0; __pyx_t_6 = -1; if (__pyx_t_5 < 0) { __pyx_t_5 += __pyx_v_view.shape[0]; if (unlikely(__pyx_t_5 < 0)) __pyx_t_6 = 0; } else if (unlikely(__pyx_t_5 >= __pyx_v_view.shape[0])) __pyx_t_6 = 0; if (unlikely(__pyx_t_6 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_6); __PYX_ERR(0, 195, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyInt_As_uint32_t(__pyx_v_count); if (unlikely((__pyx_t_7 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 195, __pyx_L1_error) __pyx_t_8 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 195, __pyx_L1_error) __pyx_f_8nescient_6crypto_6chacha__chacha_task(__pyx_v_key_w, (&(*((uint8_t *) ( /* dim=0 */ (__pyx_v_view.data + __pyx_t_5 * __pyx_v_view.strides[0]) )))), __pyx_v_nonce_w, __pyx_t_7, __pyx_t_8); /* "nescient/crypto/chacha.pyx":196 * cdef uint8_t[:] view = data * _chacha_task(key_w, &view[0], nonce_w, count, len(data)) * PyMem_Free(key_w) # <<<<<<<<<<<<<< * PyMem_Free(nonce_w) * */ PyMem_Free(__pyx_v_key_w); /* "nescient/crypto/chacha.pyx":197 * _chacha_task(key_w, &view[0], nonce_w, count, len(data)) * PyMem_Free(key_w) * PyMem_Free(nonce_w) # <<<<<<<<<<<<<< * * def chacha_encrypt(self, data, nonce=None, count=1, force_single_thread=False): */ PyMem_Free(__pyx_v_nonce_w); /* "nescient/crypto/chacha.pyx":188 * * # The function that actually performs ChaCha encryption/decryption * def _chacha_task(self, data, nonce, count=1): # <<<<<<<<<<<<<< * # Convert key from bytes to little-endian words * cdef uint32_t * key_w = bytes_to_words(self.key, 32) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); __Pyx_AddTraceback("nescient.crypto.chacha.ChaChaCrypter._chacha_task", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_view, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nescient/crypto/chacha.pyx":199 * PyMem_Free(nonce_w) * * def chacha_encrypt(self, data, nonce=None, count=1, force_single_thread=False): # <<<<<<<<<<<<<< * """ Encrypt (or decrypt) in-memory data using ChaCha20. * */ /* Python wrapper */ static PyObject *__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_5chacha_encrypt(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8nescient_6crypto_6chacha_13ChaChaCrypter_4chacha_encrypt[] = " Encrypt (or decrypt) in-memory data using ChaCha20.\n\n Because the count argument is limited to 32-bits, the most data that can be encrypted at once with this function\n is 256 GiB.\n\n Since this is a stream cipher, encryption is the same as decryption.\n\n Args:\n data: Must be either a `bytearray` or some array that is byte-addressable and supports the buffer protocol.\n (byte `RawArray`s are acceptable arguments as well.\n nonce (int): If provided, the 96-bit integer to use as a nonce for this operation. If not provided, a\n random nonce will be generated.\n count (int): The 32-bit counter at which to start the key stream.\n force_single_thread (bool): If `True`, this operation will always run in a single process.\n\n Returns:\n int: The nonce used in this operation.\n "; static PyMethodDef __pyx_mdef_8nescient_6crypto_6chacha_13ChaChaCrypter_5chacha_encrypt = {"chacha_encrypt", (PyCFunction)__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_5chacha_encrypt, METH_VARARGS|METH_KEYWORDS, __pyx_doc_8nescient_6crypto_6chacha_13ChaChaCrypter_4chacha_encrypt}; static PyObject *__pyx_pw_8nescient_6crypto_6chacha_13ChaChaCrypter_5chacha_encrypt(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; PyObject *__pyx_v_nonce = 0; PyObject *__pyx_v_count = 0; PyObject *__pyx_v_force_single_thread = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("chacha_encrypt (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,&__pyx_n_s_nonce,&__pyx_n_s_count,&__pyx_n_s_force_single_thread,0}; PyObject* values[5] = {0,0,0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_None)); values[3] = ((PyObject *)((PyObject *)__pyx_int_1)); values[4] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("chacha_encrypt", 0, 2, 5, 1); __PYX_ERR(0, 199, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nonce); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_count); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_force_single_thread); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "chacha_encrypt") < 0)) __PYX_ERR(0, 199, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_data = values[1]; __pyx_v_nonce = values[2]; __pyx_v_count = values[3]; __pyx_v_force_single_thread = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("chacha_encrypt", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 199, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("nescient.crypto.chacha.ChaChaCrypter.chacha_encrypt", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter_4chacha_encrypt(__pyx_self, __pyx_v_self, __pyx_v_data, __pyx_v_nonce, __pyx_v_count, __pyx_v_force_single_thread); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8nescient_6crypto_6chacha_13ChaChaCrypter_4chacha_encrypt(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_nonce, PyObject *__pyx_v_count, PyObject *__pyx_v_force_single_thread) { int __pyx_v_n_threads; uint32_t __pyx_v_chunk_size; uint32_t __pyx_v_blocks_per_chunk; int __pyx_v_i; uint64_t __pyx_v_ccount; uint32_t *__pyx_v_key_w; uint32_t *__pyx_v_nonce_w; uint64_t __pyx_v_l; uint8_t *__pyx_v_buffer; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; uint64_t __pyx_t_9; uint8_t *__pyx_t_10; int __pyx_t_11; int __pyx_t_12; __Pyx_RefNannySetupContext("chacha_encrypt", 0); __Pyx_INCREF(__pyx_v_nonce); /* "nescient/crypto/chacha.pyx":219 * """ * # Generate a random 96-bit nonce if unspecified * if nonce is None: # <<<<<<<<<<<<<< * nonce = randbits(96) * # Determine the number of threads to use based on CPU count */ __pyx_t_1 = (__pyx_v_nonce == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "nescient/crypto/chacha.pyx":220 * # Generate a random 96-bit nonce if unspecified * if nonce is None: * nonce = randbits(96) # <<<<<<<<<<<<<< * # Determine the number of threads to use based on CPU count * cdef int n_threads = cpu_count() */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_randbits); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_nonce, __pyx_t_4); __pyx_t_4 = 0; /* "nescient/crypto/chacha.pyx":219 * """ * # Generate a random 96-bit nonce if unspecified * if nonce is None: # <<<<<<<<<<<<<< * nonce = randbits(96) * # Determine the number of threads to use based on CPU count */ } /* "nescient/crypto/chacha.pyx":222 * nonce = randbits(96) * # Determine the number of threads to use based on CPU count * cdef int n_threads = cpu_count() # <<<<<<<<<<<<<< * # Determine the size of the chunks to use for each thread, and the number of ChaCha blocks per chunk * cdef uint32_t chunk_size = len(data)//n_threads//64*64 */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_cpu_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (__pyx_t_5) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_n_threads = __pyx_t_6; /* "nescient/crypto/chacha.pyx":224 * cdef int n_threads = cpu_count() * # Determine the size of the chunks to use for each thread, and the number of ChaCha blocks per chunk * cdef uint32_t chunk_size = len(data)//n_threads//64*64 # <<<<<<<<<<<<<< * cdef uint32_t blocks_per_chunk = chunk_size//64 * # If forced to use a single thread, or multiprocessing would be slower than a single process, */ __pyx_t_7 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 224, __pyx_L1_error) if (unlikely(__pyx_v_n_threads == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(0, 224, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((int)-1) > 0)) && unlikely(__pyx_v_n_threads == (int)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_t_7))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(0, 224, __pyx_L1_error) } __pyx_v_chunk_size = (__Pyx_div_Py_ssize_t(__Pyx_div_Py_ssize_t(__pyx_t_7, __pyx_v_n_threads), 64) * 64); /* "nescient/crypto/chacha.pyx":225 * # Determine the size of the chunks to use for each thread, and the number of ChaCha blocks per chunk * cdef uint32_t chunk_size = len(data)//n_threads//64*64 * cdef uint32_t blocks_per_chunk = chunk_size//64 # <<<<<<<<<<<<<< * # If forced to use a single thread, or multiprocessing would be slower than a single process, * # run in a single process */ __pyx_v_blocks_per_chunk = __Pyx_div_long(__pyx_v_chunk_size, 64); /* "nescient/crypto/chacha.pyx":229 * # run in a single process * # TODO: 2**20 bytes = 1 MiB is an artificially set breakpoint for performance; change this * if force_single_thread or n_threads == 1 or len(data) < 2**20 or blocks_per_chunk == 0: # <<<<<<<<<<<<<< * self._chacha_task(data, nonce, count) * return nonce */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_force_single_thread); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 229, __pyx_L1_error) if (!__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L5_bool_binop_done; } __pyx_t_1 = ((__pyx_v_n_threads == 1) != 0); if (!__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L5_bool_binop_done; } __pyx_t_7 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 229, __pyx_L1_error) __pyx_t_1 = ((__pyx_t_7 < 0x100000) != 0); if (!__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L5_bool_binop_done; } __pyx_t_1 = ((__pyx_v_blocks_per_chunk == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L5_bool_binop_done:; if (__pyx_t_2) { /* "nescient/crypto/chacha.pyx":230 * # TODO: 2**20 bytes = 1 MiB is an artificially set breakpoint for performance; change this * if force_single_thread or n_threads == 1 or len(data) < 2**20 or blocks_per_chunk == 0: * self._chacha_task(data, nonce, count) # <<<<<<<<<<<<<< * return nonce * # Begin Cython multiprocessing using OpenMP */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_chacha_task); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_5, __pyx_v_data, __pyx_v_nonce, __pyx_v_count}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_6, 3+__pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_5, __pyx_v_data, __pyx_v_nonce, __pyx_v_count}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_6, 3+__pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { __pyx_t_8 = PyTuple_New(3+__pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_v_data); __Pyx_GIVEREF(__pyx_v_data); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_6, __pyx_v_data); __Pyx_INCREF(__pyx_v_nonce); __Pyx_GIVEREF(__pyx_v_nonce); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_6, __pyx_v_nonce); __Pyx_INCREF(__pyx_v_count); __Pyx_GIVEREF(__pyx_v_count); PyTuple_SET_ITEM(__pyx_t_8, 2+__pyx_t_6, __pyx_v_count); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "nescient/crypto/chacha.pyx":231 * if force_single_thread or n_threads == 1 or len(data) < 2**20 or blocks_per_chunk == 0: * self._chacha_task(data, nonce, count) * return nonce # <<<<<<<<<<<<<< * # Begin Cython multiprocessing using OpenMP * cdef int i */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_nonce); __pyx_r = __pyx_v_nonce; goto __pyx_L0; /* "nescient/crypto/chacha.pyx":229 * # run in a single process * # TODO: 2**20 bytes = 1 MiB is an artificially set breakpoint for performance; change this * if force_single_thread or n_threads == 1 or len(data) < 2**20 or blocks_per_chunk == 0: # <<<<<<<<<<<<<< * self._chacha_task(data, nonce, count) * return nonce */ } /* "nescient/crypto/chacha.pyx":234 * # Begin Cython multiprocessing using OpenMP * cdef int i * cdef uint64_t ccount = count # <<<<<<<<<<<<<< * # Convert key from bytes to little-endian words * cdef uint32_t * key_w = bytes_to_words(self.key, 32) */ __pyx_t_9 = __Pyx_PyInt_As_uint64_t(__pyx_v_count); if (unlikely((__pyx_t_9 == ((uint64_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 234, __pyx_L1_error) __pyx_v_ccount = __pyx_t_9; /* "nescient/crypto/chacha.pyx":236 * cdef uint64_t ccount = count * # Convert key from bytes to little-endian words * cdef uint32_t * key_w = bytes_to_words(self.key, 32) # <<<<<<<<<<<<<< * # Convert the nonce into an array of 32-bit words * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_10 = __Pyx_PyObject_AsWritableUString(__pyx_t_4); if (unlikely((!__pyx_t_10) && PyErr_Occurred())) __PYX_ERR(0, 236, __pyx_L1_error) __pyx_v_key_w = __pyx_f_8nescient_6crypto_6chacha_bytes_to_words(__pyx_t_10, 32); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "nescient/crypto/chacha.pyx":238 * cdef uint32_t * key_w = bytes_to_words(self.key, 32) * # Convert the nonce into an array of 32-bit words * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) # <<<<<<<<<<<<<< * cdef uint64_t l = len(data) * cdef uint8_t * buffer = data */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_nonce, __pyx_n_s_to_bytes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_10 = __Pyx_PyObject_AsWritableUString(__pyx_t_3); if (unlikely((!__pyx_t_10) && PyErr_Occurred())) __PYX_ERR(0, 238, __pyx_L1_error) __pyx_v_nonce_w = __pyx_f_8nescient_6crypto_6chacha_bytes_to_words(__pyx_t_10, 12); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "nescient/crypto/chacha.pyx":239 * # Convert the nonce into an array of 32-bit words * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) * cdef uint64_t l = len(data) # <<<<<<<<<<<<<< * cdef uint8_t * buffer = data * for i in prange(n_threads, nogil=True): */ __pyx_t_7 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 239, __pyx_L1_error) __pyx_v_l = __pyx_t_7; /* "nescient/crypto/chacha.pyx":240 * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) * cdef uint64_t l = len(data) * cdef uint8_t * buffer = data # <<<<<<<<<<<<<< * for i in prange(n_threads, nogil=True): * if i == n_threads-1: */ __pyx_t_10 = __Pyx_PyObject_AsWritableUString(__pyx_v_data); if (unlikely((!__pyx_t_10) && PyErr_Occurred())) __PYX_ERR(0, 240, __pyx_L1_error) __pyx_v_buffer = __pyx_t_10; /* "nescient/crypto/chacha.pyx":241 * cdef uint64_t l = len(data) * cdef uint8_t * buffer = data * for i in prange(n_threads, nogil=True): # <<<<<<<<<<<<<< * if i == n_threads-1: * _chacha_task(key_w, buffer+((n_threads-1)*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_6 = __pyx_v_n_threads; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_12 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_12 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_2) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_12; __pyx_t_11++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_11); /* "nescient/crypto/chacha.pyx":242 * cdef uint8_t * buffer = data * for i in prange(n_threads, nogil=True): * if i == n_threads-1: # <<<<<<<<<<<<<< * _chacha_task(key_w, buffer+((n_threads-1)*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), * l-(n_threads-1)*chunk_size) */ __pyx_t_2 = ((__pyx_v_i == (__pyx_v_n_threads - 1)) != 0); if (__pyx_t_2) { /* "nescient/crypto/chacha.pyx":243 * for i in prange(n_threads, nogil=True): * if i == n_threads-1: * _chacha_task(key_w, buffer+((n_threads-1)*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), # <<<<<<<<<<<<<< * l-(n_threads-1)*chunk_size) * else: */ __pyx_f_8nescient_6crypto_6chacha__chacha_task(__pyx_v_key_w, (__pyx_v_buffer + ((__pyx_v_n_threads - 1) * __pyx_v_chunk_size)), __pyx_v_nonce_w, (__pyx_v_ccount + (__pyx_v_blocks_per_chunk * __pyx_v_i)), (__pyx_v_l - ((__pyx_v_n_threads - 1) * __pyx_v_chunk_size))); /* "nescient/crypto/chacha.pyx":242 * cdef uint8_t * buffer = data * for i in prange(n_threads, nogil=True): * if i == n_threads-1: # <<<<<<<<<<<<<< * _chacha_task(key_w, buffer+((n_threads-1)*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), * l-(n_threads-1)*chunk_size) */ goto __pyx_L16; } /* "nescient/crypto/chacha.pyx":246 * l-(n_threads-1)*chunk_size) * else: * _chacha_task(key_w, buffer+(i*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), chunk_size) # <<<<<<<<<<<<<< * PyMem_Free(key_w) * PyMem_Free(nonce_w) */ /*else*/ { __pyx_f_8nescient_6crypto_6chacha__chacha_task(__pyx_v_key_w, (__pyx_v_buffer + (__pyx_v_i * __pyx_v_chunk_size)), __pyx_v_nonce_w, (__pyx_v_ccount + (__pyx_v_blocks_per_chunk * __pyx_v_i)), __pyx_v_chunk_size); } __pyx_L16:; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "nescient/crypto/chacha.pyx":241 * cdef uint64_t l = len(data) * cdef uint8_t * buffer = data * for i in prange(n_threads, nogil=True): # <<<<<<<<<<<<<< * if i == n_threads-1: * _chacha_task(key_w, buffer+((n_threads-1)*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L11; } __pyx_L11:; } } /* "nescient/crypto/chacha.pyx":247 * else: * _chacha_task(key_w, buffer+(i*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), chunk_size) * PyMem_Free(key_w) # <<<<<<<<<<<<<< * PyMem_Free(nonce_w) * return nonce */ PyMem_Free(__pyx_v_key_w); /* "nescient/crypto/chacha.pyx":248 * _chacha_task(key_w, buffer+(i*chunk_size), nonce_w, ccount+(blocks_per_chunk*i), chunk_size) * PyMem_Free(key_w) * PyMem_Free(nonce_w) # <<<<<<<<<<<<<< * return nonce * */ PyMem_Free(__pyx_v_nonce_w); /* "nescient/crypto/chacha.pyx":249 * PyMem_Free(key_w) * PyMem_Free(nonce_w) * return nonce # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_nonce); __pyx_r = __pyx_v_nonce; goto __pyx_L0; /* "nescient/crypto/chacha.pyx":199 * PyMem_Free(nonce_w) * * def chacha_encrypt(self, data, nonce=None, count=1, force_single_thread=False): # <<<<<<<<<<<<<< * """ Encrypt (or decrypt) in-memory data using ChaCha20. * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nescient.crypto.chacha.ChaChaCrypter.chacha_encrypt", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_nonce); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":121 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 121, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 121, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 121, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) } else { /* "View.MemoryView":122 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 121, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 121, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 121, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":121 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":128 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 128, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 128, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":129 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":131 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":132 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 132, __pyx_L1_error) /* "View.MemoryView":131 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":134 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":135 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 135, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 135, __pyx_L1_error) /* "View.MemoryView":134 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":137 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":138 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":137 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":139 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 139, __pyx_L1_error) __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":140 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 140, __pyx_L1_error) } __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":143 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":144 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":146 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":147 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 147, __pyx_L1_error) /* "View.MemoryView":146 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":150 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 150, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 150, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":151 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":152 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 152, __pyx_L1_error) /* "View.MemoryView":151 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":153 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":150 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":156 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 156, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":157 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":158 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":156 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":159 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 159, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":160 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":161 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":159 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":163 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 163, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":165 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":168 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":169 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 169, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 169, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":170 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":173 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":174 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":175 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 175, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 175, __pyx_L1_error) /* "View.MemoryView":174 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":177 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":178 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":179 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 179, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 179, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_8 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_8; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":180 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":181 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":177 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":170 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":121 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":184 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":185 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":186 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 186, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":187 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":186 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":188 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 188, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":189 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":188 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":190 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":191 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 191, __pyx_L1_error) /* "View.MemoryView":190 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":192 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":193 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":194 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":195 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":196 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":197 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":198 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":199 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":201 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":202 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":201 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":204 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":206 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":184 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":210 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":211 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":212 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":211 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":213 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":214 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":214 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":217 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":213 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":218 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":210 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":221 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":222 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":221 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":225 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":226 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":227 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":225 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":229 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":230 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":229 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":232 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":233 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":232 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":235 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":236 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":235 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":238 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":239 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 239, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":238 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":243 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":247 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":248 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":247 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":250 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":251 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 251, __pyx_L1_error) /* "View.MemoryView":250 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":252 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":254 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":243 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":280 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 280, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 280, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":281 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":280 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":282 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":283 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":282 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { int __pyx_v_use_setstate; PyObject *__pyx_v_state = NULL; PyObject *__pyx_v__dict = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":3 * def __reduce_cython__(self): * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":4 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":6 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":7 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":9 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":11 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":13 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 15, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":297 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":299 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":303 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":305 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":306 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":305 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":308 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":297 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":344 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 344, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 344, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 344, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 344, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 344, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":345 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":346 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":347 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":348 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 348, __pyx_L1_error) /* "View.MemoryView":349 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":350 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":351 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":349 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":347 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":354 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":355 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":356 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":354 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":357 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":358 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":359 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":360 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 360, __pyx_L1_error) /* "View.MemoryView":359 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":357 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":362 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":363 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":362 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":365 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":367 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":369 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":344 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":371 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":372 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":373 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":372 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":377 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":378 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":379 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":380 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":381 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":382 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":381 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":384 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":379 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":386 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":377 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":371 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":388 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":390 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":392 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 392, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 392, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 392, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 392, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":393 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 393, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 393, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":392 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":395 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":388 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":398 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":399 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":400 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":399 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":402 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 402, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 402, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":405 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 405, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":406 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":405 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":408 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 408, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":409 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":398 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":411 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":412 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":413 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 413, __pyx_L1_error) /* "View.MemoryView":412 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":415 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 415, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 415, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":417 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 417, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":418 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":419 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 419, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":420 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":419 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":422 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 422, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":417 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":424 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 424, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":411 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":426 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":427 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":428 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":429 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 429, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":430 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":429 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 429, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 429, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":428 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":431 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 431, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":432 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":428 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":427 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":434 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":426 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":436 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":440 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 440, __pyx_L1_error) /* "View.MemoryView":441 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 441, __pyx_L1_error) /* "View.MemoryView":442 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 442, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 442, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 442, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 442, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":440 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 440, __pyx_L1_error) /* "View.MemoryView":436 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":444 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":446 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":451 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":453 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":454 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":455 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":456 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 456, __pyx_L1_error) /* "View.MemoryView":455 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":457 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":453 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":459 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":461 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":462 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":463 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":462 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":465 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 465, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":469 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":470 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":469 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":471 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":474 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":444 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":476 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":477 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 477, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":478 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":476 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":480 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":483 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":486 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":487 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":488 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 488, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 488, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 488, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":487 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":492 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":493 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":492 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":494 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":489 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 489, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 489, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":490 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 490, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":487 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":480 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":496 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":499 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":504 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":505 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 505, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":504 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":507 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 507, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 507, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 507, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":509 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 509, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":510 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":509 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":510 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":496 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":513 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":514 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":515 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 515, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 515, __pyx_L1_error) /* "View.MemoryView":514 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":517 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":518 * * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":517 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":520 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":522 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":522 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":525 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":527 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":527 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":530 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":532 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":532 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":535 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":537 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":538 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":539 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":540 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":541 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":542 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":513 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":548 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":549 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 549, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":550 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 550, __pyx_L1_error) /* "View.MemoryView":551 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":548 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":554 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":555 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":554 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":558 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":559 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 559, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":558 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":562 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":563 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":565 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 565, __pyx_L1_error) /* "View.MemoryView":563 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":567 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":562 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":570 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":571 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":572 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__17, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":571 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":574 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 574, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 574, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 574, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 574, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":570 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":577 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":578 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 578, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":577 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":581 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":582 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 582, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":581 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":585 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":586 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 586, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 586, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 586, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":585 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":589 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":590 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":591 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":593 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":594 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 594, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":596 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":590 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":598 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":589 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":600 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":601 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":602 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":601 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":604 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":600 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":606 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":607 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":608 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":607 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":606 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":610 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":611 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":610 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":614 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":617 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":618 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 618, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":614 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":620 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":623 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":624 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 624, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":620 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":626 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":628 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":630 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":631 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 631, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":636 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":626 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":638 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":640 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":642 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":643 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 643, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":648 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 648, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":638 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":652 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":653 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":654 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":655 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":652 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":658 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":659 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":658 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":661 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":666 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":667 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":666 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":669 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":671 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 671, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":672 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":673 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":674 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 674, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 674, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 674, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 674, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 674, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 674, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 674, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 674, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":675 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":676 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":677 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 677, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__20); __Pyx_GIVEREF(__pyx_slice__20); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__20); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":678 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":676 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":680 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__21); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 680, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":681 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":675 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":683 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":684 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 684, __pyx_L1_error) /* "View.MemoryView":683 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":686 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":687 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 687, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":674 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":689 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 689, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":690 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":691 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__22); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":690 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":693 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":661 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":695 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":696 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":697 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":698 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 698, __pyx_L1_error) /* "View.MemoryView":697 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":695 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":705 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":706 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":713 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":717 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 717, __pyx_L1_error) } } #endif /* "View.MemoryView":719 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":720 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 720, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":721 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":719 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":723 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":724 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":730 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":731 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":736 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":737 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":741 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 741, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 741, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 741, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 741, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":742 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":746 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 746, __pyx_L1_error) /* "View.MemoryView":743 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 743, __pyx_L1_error) /* "View.MemoryView":742 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":749 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":750 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":751 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":752 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":753 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":749 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":755 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 755, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 755, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 755, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":756 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 756, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 756, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":757 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 757, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 757, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 757, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":759 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":760 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":761 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":763 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 763, __pyx_L1_error) /* "View.MemoryView":769 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":741 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":771 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":772 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":773 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 773, __pyx_L1_error) } /* "View.MemoryView":774 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 774, __pyx_L1_error) } /* "View.MemoryView":772 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 772, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":771 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":777 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":777 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":705 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":802 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":822 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":824 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":825 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":824 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":826 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":827 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 827, __pyx_L1_error) /* "View.MemoryView":826 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":822 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":830 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":832 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":833 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 833, __pyx_L1_error) /* "View.MemoryView":832 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":836 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":837 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":838 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":839 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":840 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":839 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":837 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":841 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":842 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":845 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":841 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":836 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":847 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":852 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":855 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":856 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":855 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":853 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":857 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":857 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":852 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":860 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":860 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":863 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":865 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":865 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":870 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":872 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":873 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":872 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":875 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":876 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":875 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":879 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":880 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":881 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":884 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":885 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":884 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":887 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":889 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":891 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":892 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":891 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":894 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":895 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 894, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":890 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":897 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":889 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":899 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":802 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":905 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":907 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":908 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":911 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":912 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 912, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 912, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":913 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":911 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":915 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":916 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":917 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":918 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":917 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":920 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":921 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":922 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":923 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 923, __pyx_L1_error) /* "View.MemoryView":922 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":920 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":925 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":926 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 926, __pyx_L1_error) /* "View.MemoryView":925 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":928 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":929 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":930 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":929 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":932 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":905 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":938 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; /* "View.MemoryView":939 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":941 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":942 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":946 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":947 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":948 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":949 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":951 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":952 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 952, __pyx_L1_error) /* "View.MemoryView":951 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":954 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":938 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":971 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":972 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":971 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":974 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":975 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":976 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":975 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":978 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":974 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":980 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":981 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":982 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 982, __pyx_L1_error) /* "View.MemoryView":981 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":984 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 984, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":980 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":987 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":988 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":987 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":994 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1002 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1003 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1002 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1008 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1008, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1008, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1008, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1010 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1011 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1013 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1014 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1016 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1017 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1018 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1019 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1020 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1022 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1023 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1022 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1025 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1027 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1028 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1031 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1032 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1033 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1034 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1035 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1033 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1037 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1038 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1039 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1039, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1039, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1039, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1041 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1042 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1044 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":994 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1047 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1050 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1051 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1051, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1052 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1050 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1054 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1055 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1047 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1058 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1062 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1063 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1064 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1066 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1067 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1069 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1070 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1071 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1072 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1058 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1075 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1078 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1079 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1075 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1082 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1089 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1090 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1091 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1089 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1093 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1094 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1096 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1098 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1096, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1082 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1104 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1105 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1106 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1105 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1108 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1104 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1111 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1116 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1117 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1119 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1120 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1121 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1122 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1120 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1124 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1125 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1125 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1129 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1130 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1129 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1132 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1111 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1135 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1142 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1143 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1144 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1145 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1147 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1148 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1149 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1148 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1150 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1148 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1152 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1153 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1154 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1155 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1147 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1157 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1162 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1163 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1135 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1165 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1168 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1165 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1172 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1175 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1177 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1178 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1180 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1172 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1183 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1192 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1193 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1194 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1195 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1192 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1197 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1198 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1201 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1183 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1204 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; /* "View.MemoryView":1215 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1216 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1218 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1219 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1220 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1220, __pyx_L1_error) /* "View.MemoryView":1219 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1223 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1224 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1225 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1226 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1227 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1229 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1233 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1234 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1235 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1234 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1237 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1238 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1237 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1240 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1242 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1204 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1250 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1249 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1249, __pyx_L1_error) /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1253 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1254 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1254, __pyx_L1_error) /* "View.MemoryView":1253 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1259 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1259, __pyx_L1_error) /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1261 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1261, __pyx_L1_error) } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1264 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; /* "View.MemoryView":1272 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1273 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1275 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1276 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1277 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1280 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1281 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1280 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1282 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1283 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1282 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1285 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1287 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1288 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1289 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1291 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1289 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1293 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1293, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1288 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1295 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1296 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1296, __pyx_L1_error) /* "View.MemoryView":1295 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1298 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1301 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1300 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1303 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1303, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1304 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1298 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1306 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1309 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1310 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1309 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1311 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1312 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1311 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1314 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1317 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1318 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1319 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1320 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1314 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1306 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1322 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1325 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1325, __pyx_L1_error) /* "View.MemoryView":1326 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1326, __pyx_L1_error) /* "View.MemoryView":1322 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1328 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1329 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1330 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1332 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1333 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1264 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1336 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1340 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1342 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1343 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1344 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1345 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1347 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1348 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1349 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1350 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1336 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1358 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1362 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1363 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1362 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1358 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1367 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1370 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1367 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1373 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1377 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1378 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1379 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1380 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1379 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1382 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1378 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1384 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1385 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1387 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1373 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1393 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1396 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1397 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1399 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1393 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1403 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1407 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1408 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1410 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1411 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1412 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1413 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1410 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1415 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1418 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1403 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = NULL; PyObject *__pyx_v___pyx_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":3 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":4 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_5) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":5 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_6) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_INCREF(__pyx_v___pyx_type); __Pyx_GIVEREF(__pyx_v___pyx_type); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v___pyx_type); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_7 = (__pyx_t_1 != 0); if (__pyx_t_7) { /* "(tree fragment)":7 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 7, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":8 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":10 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 10, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 10, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 11, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":12 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_8) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "nescient.crypto.chacha.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "nescient.crypto.chacha.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "nescient.crypto.chacha.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "nescient.crypto.chacha._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_chacha(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_chacha}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "chacha", __pyx_k_Classes_and_Cython_functions_fo, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_A_Crypter_object_used_for_encry, __pyx_k_A_Crypter_object_used_for_encry, sizeof(__pyx_k_A_Crypter_object_used_for_encry), 0, 0, 1, 0}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_ChaChaCrypter, __pyx_k_ChaChaCrypter, sizeof(__pyx_k_ChaChaCrypter), 0, 0, 1, 1}, {&__pyx_n_s_ChaChaCrypter___init, __pyx_k_ChaChaCrypter___init, sizeof(__pyx_k_ChaChaCrypter___init), 0, 0, 1, 1}, {&__pyx_n_s_ChaChaCrypter__chacha_task, __pyx_k_ChaChaCrypter__chacha_task, sizeof(__pyx_k_ChaChaCrypter__chacha_task), 0, 0, 1, 1}, {&__pyx_n_s_ChaChaCrypter_chacha_encrypt, __pyx_k_ChaChaCrypter_chacha_encrypt, sizeof(__pyx_k_ChaChaCrypter_chacha_encrypt), 0, 0, 1, 1}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_Process, __pyx_k_Process, sizeof(__pyx_k_Process), 0, 0, 1, 1}, {&__pyx_n_s_RawArray, __pyx_k_RawArray, sizeof(__pyx_k_RawArray), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_active_children, __pyx_k_active_children, sizeof(__pyx_k_active_children), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_auth, __pyx_k_auth, sizeof(__pyx_k_auth), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_big, __pyx_k_big, sizeof(__pyx_k_big), 0, 0, 1, 1}, {&__pyx_n_s_blocks_per_chunk, __pyx_k_blocks_per_chunk, sizeof(__pyx_k_blocks_per_chunk), 0, 0, 1, 1}, {&__pyx_n_s_buffer, __pyx_k_buffer, sizeof(__pyx_k_buffer), 0, 0, 1, 1}, {&__pyx_n_s_byteorder, __pyx_k_byteorder, sizeof(__pyx_k_byteorder), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_c_ubyte, __pyx_k_c_ubyte, sizeof(__pyx_k_c_ubyte), 0, 0, 1, 1}, {&__pyx_n_s_ccount, __pyx_k_ccount, sizeof(__pyx_k_ccount), 0, 0, 1, 1}, {&__pyx_n_s_chacha_decrypt, __pyx_k_chacha_decrypt, sizeof(__pyx_k_chacha_decrypt), 0, 0, 1, 1}, {&__pyx_n_s_chacha_encrypt, __pyx_k_chacha_encrypt, sizeof(__pyx_k_chacha_encrypt), 0, 0, 1, 1}, {&__pyx_n_s_chacha_task, __pyx_k_chacha_task, sizeof(__pyx_k_chacha_task), 0, 0, 1, 1}, {&__pyx_n_s_chunk_size, __pyx_k_chunk_size, sizeof(__pyx_k_chunk_size), 0, 0, 1, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, {&__pyx_n_s_cpu_count, __pyx_k_cpu_count, sizeof(__pyx_k_cpu_count), 0, 0, 1, 1}, {&__pyx_n_s_ctypes, __pyx_k_ctypes, sizeof(__pyx_k_ctypes), 0, 0, 1, 1}, {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_force_single_thread, __pyx_k_force_single_thread, sizeof(__pyx_k_force_single_thread), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_key, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1}, {&__pyx_n_s_key_w, __pyx_k_key_w, sizeof(__pyx_k_key_w), 0, 0, 1, 1}, {&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, {&__pyx_n_s_little, __pyx_k_little, sizeof(__pyx_k_little), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_modes, __pyx_k_modes, sizeof(__pyx_k_modes), 0, 0, 1, 1}, {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, {&__pyx_n_s_multiprocessing, __pyx_k_multiprocessing, sizeof(__pyx_k_multiprocessing), 0, 0, 1, 1}, {&__pyx_n_s_multiprocessing_sharedctypes, __pyx_k_multiprocessing_sharedctypes, sizeof(__pyx_k_multiprocessing_sharedctypes), 0, 0, 1, 1}, {&__pyx_n_s_n_threads, __pyx_k_n_threads, sizeof(__pyx_k_n_threads), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_nescient_crypto_chacha, __pyx_k_nescient_crypto_chacha, sizeof(__pyx_k_nescient_crypto_chacha), 0, 0, 1, 1}, {&__pyx_kp_s_nescient_crypto_chacha_pyx, __pyx_k_nescient_crypto_chacha_pyx, sizeof(__pyx_k_nescient_crypto_chacha_pyx), 0, 0, 1, 0}, {&__pyx_n_s_nescient_crypto_tools, __pyx_k_nescient_crypto_tools, sizeof(__pyx_k_nescient_crypto_tools), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_nonce, __pyx_k_nonce, sizeof(__pyx_k_nonce), 0, 0, 1, 1}, {&__pyx_n_s_nonce_w, __pyx_k_nonce_w, sizeof(__pyx_k_nonce_w), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, {&__pyx_n_s_randbits, __pyx_k_randbits, sizeof(__pyx_k_randbits), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_sha, __pyx_k_sha, sizeof(__pyx_k_sha), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_sleep, __pyx_k_sleep, sizeof(__pyx_k_sleep), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stm, __pyx_k_stm, sizeof(__pyx_k_stm), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_sys, __pyx_k_sys, sizeof(__pyx_k_sys), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_time, __pyx_k_time, sizeof(__pyx_k_time), 0, 0, 1, 1}, {&__pyx_n_s_to_bytes, __pyx_k_to_bytes, sizeof(__pyx_k_to_bytes), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_view, __pyx_k_view, sizeof(__pyx_k_view), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 28, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 132, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 147, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 150, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 399, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 608, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 827, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nescient/crypto/chacha.pyx":183 * def __init__(self, key): * assert len(key) == 32 * self.key = key[:] # <<<<<<<<<<<<<< * # Since this is a stream cipher encryption is the same as decryption * self.chacha_decrypt = self.chacha_encrypt */ __pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); /* "nescient/crypto/chacha.pyx":192 * cdef uint32_t * key_w = bytes_to_words(self.key, 32) * # Convert the nonce into an array of 32-bit words * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) # <<<<<<<<<<<<<< * # Create a typed memoryview of data and pass its address * cdef uint8_t[:] view = data */ __pyx_tuple__2 = PyTuple_Pack(2, __pyx_int_12, __pyx_n_s_little); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "nescient/crypto/chacha.pyx":220 * # Generate a random 96-bit nonce if unspecified * if nonce is None: * nonce = randbits(96) # <<<<<<<<<<<<<< * # Determine the number of threads to use based on CPU count * cdef int n_threads = cpu_count() */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_int_96); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "nescient/crypto/chacha.pyx":238 * cdef uint32_t * key_w = bytes_to_words(self.key, 32) * # Convert the nonce into an array of 32-bit words * cdef uint32_t * nonce_w = bytes_to_words(nonce.to_bytes(12, 'little'), 12) # <<<<<<<<<<<<<< * cdef uint64_t l = len(data) * cdef uint8_t * buffer = data */ __pyx_tuple__4 = PyTuple_Pack(2, __pyx_int_12, __pyx_n_s_little); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":132 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":135 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 135, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":138 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":147 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":175 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 175, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":191 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":413 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":490 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 490, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":515 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 515, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":565 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "View.MemoryView":572 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__17 = PyTuple_New(1); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__17, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "View.MemoryView":677 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__20 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__20)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__20); __Pyx_GIVEREF(__pyx_slice__20); /* "View.MemoryView":680 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__21 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__21)) __PYX_ERR(1, 680, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__21); __Pyx_GIVEREF(__pyx_slice__21); /* "View.MemoryView":691 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__22 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__22)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); /* "View.MemoryView":698 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "nescient/crypto/chacha.pyx":181 * auth = ['sha'] * * def __init__(self, key): # <<<<<<<<<<<<<< * assert len(key) == 32 * self.key = key[:] */ __pyx_tuple__26 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_key); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_nescient_crypto_chacha_pyx, __pyx_n_s_init, 181, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 181, __pyx_L1_error) /* "nescient/crypto/chacha.pyx":188 * * # The function that actually performs ChaCha encryption/decryption * def _chacha_task(self, data, nonce, count=1): # <<<<<<<<<<<<<< * # Convert key from bytes to little-endian words * cdef uint32_t * key_w = bytes_to_words(self.key, 32) */ __pyx_tuple__28 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_nonce, __pyx_n_s_count, __pyx_n_s_key_w, __pyx_n_s_nonce_w, __pyx_n_s_view); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(4, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_nescient_crypto_chacha_pyx, __pyx_n_s_chacha_task, 188, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 188, __pyx_L1_error) __pyx_tuple__30 = PyTuple_Pack(1, ((PyObject *)__pyx_int_1)); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); /* "nescient/crypto/chacha.pyx":199 * PyMem_Free(nonce_w) * * def chacha_encrypt(self, data, nonce=None, count=1, force_single_thread=False): # <<<<<<<<<<<<<< * """ Encrypt (or decrypt) in-memory data using ChaCha20. * */ __pyx_tuple__31 = PyTuple_Pack(14, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_nonce, __pyx_n_s_count, __pyx_n_s_force_single_thread, __pyx_n_s_n_threads, __pyx_n_s_chunk_size, __pyx_n_s_blocks_per_chunk, __pyx_n_s_i, __pyx_n_s_ccount, __pyx_n_s_key_w, __pyx_n_s_nonce_w, __pyx_n_s_l, __pyx_n_s_buffer); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(5, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_nescient_crypto_chacha_pyx, __pyx_n_s_chacha_encrypt, 199, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 199, __pyx_L1_error) __pyx_tuple__33 = PyTuple_Pack(3, ((PyObject *)Py_None), ((PyObject *)__pyx_int_1), ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); /* "View.MemoryView":285 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__34 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__34); __Pyx_GIVEREF(__pyx_tuple__34); /* "View.MemoryView":286 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__35 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__35)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__35); __Pyx_GIVEREF(__pyx_tuple__35); /* "View.MemoryView":287 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__36 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__36); __Pyx_GIVEREF(__pyx_tuple__36); /* "View.MemoryView":290 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__37 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__37); __Pyx_GIVEREF(__pyx_tuple__37); /* "View.MemoryView":291 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__38 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__38)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__38); __Pyx_GIVEREF(__pyx_tuple__38); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_tuple__39 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__39); __Pyx_GIVEREF(__pyx_tuple__39); __pyx_codeobj__40 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__40)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_12 = PyInt_FromLong(12); if (unlikely(!__pyx_int_12)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_96 = PyInt_FromLong(96); if (unlikely(!__pyx_int_96)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_modinit_global_init_code(void); /*proto*/ static int __Pyx_modinit_variable_export_code(void); /*proto*/ static int __Pyx_modinit_function_export_code(void); /*proto*/ static int __Pyx_modinit_type_init_code(void); /*proto*/ static int __Pyx_modinit_type_import_code(void); /*proto*/ static int __Pyx_modinit_variable_import_code(void); /*proto*/ static int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ if (__Pyx_ExportFunction("_chacha_task", (void (*)(void))__pyx_f_8nescient_6crypto_6chacha__chacha_task, "void (uint32_t *, uint8_t *, uint32_t *, uint32_t, uint64_t)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 104, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 104, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 104, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 278, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 278, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 329, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 329, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 329, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 960, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 960, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 960, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (!(defined(__cplusplus)) || (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 4))) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initchacha(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initchacha(void) #else __Pyx_PyMODINIT_FUNC PyInit_chacha(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_chacha(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_chacha(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; static PyThread_type_lock __pyx_t_4[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_chacha(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("chacha", __pyx_methods, __pyx_k_Classes_and_Cython_functions_fo, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_nescient__crypto__chacha) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "nescient.crypto.chacha")) { if (unlikely(PyDict_SetItemString(modules, "nescient.crypto.chacha", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); if (unlikely(__Pyx_modinit_function_export_code() != 0)) goto __pyx_L1_error; if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "nescient/crypto/chacha.pyx":12 * from cpython.mem cimport PyMem_Malloc, PyMem_Free * * import sys # <<<<<<<<<<<<<< * from time import sleep * from multiprocessing import cpu_count, Process, active_children */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_sys, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_sys, __pyx_t_1) < 0) __PYX_ERR(0, 12, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":13 * * import sys * from time import sleep # <<<<<<<<<<<<<< * from multiprocessing import cpu_count, Process, active_children * from multiprocessing.sharedctypes import RawArray */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_sleep); __Pyx_GIVEREF(__pyx_n_s_sleep); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_sleep); __pyx_t_2 = __Pyx_Import(__pyx_n_s_time, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_sleep); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_sleep, __pyx_t_1) < 0) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nescient/crypto/chacha.pyx":14 * import sys * from time import sleep * from multiprocessing import cpu_count, Process, active_children # <<<<<<<<<<<<<< * from multiprocessing.sharedctypes import RawArray * from ctypes import c_ubyte */ __pyx_t_2 = PyList_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_cpu_count); __Pyx_GIVEREF(__pyx_n_s_cpu_count); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_cpu_count); __Pyx_INCREF(__pyx_n_s_Process); __Pyx_GIVEREF(__pyx_n_s_Process); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_Process); __Pyx_INCREF(__pyx_n_s_active_children); __Pyx_GIVEREF(__pyx_n_s_active_children); PyList_SET_ITEM(__pyx_t_2, 2, __pyx_n_s_active_children); __pyx_t_1 = __Pyx_Import(__pyx_n_s_multiprocessing, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_cpu_count); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cpu_count, __pyx_t_2) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Process); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Process, __pyx_t_2) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_active_children); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_active_children, __pyx_t_2) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":15 * from time import sleep * from multiprocessing import cpu_count, Process, active_children * from multiprocessing.sharedctypes import RawArray # <<<<<<<<<<<<<< * from ctypes import c_ubyte * from cython.parallel import prange */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_RawArray); __Pyx_GIVEREF(__pyx_n_s_RawArray); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_RawArray); __pyx_t_2 = __Pyx_Import(__pyx_n_s_multiprocessing_sharedctypes, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_RawArray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_RawArray, __pyx_t_1) < 0) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nescient/crypto/chacha.pyx":16 * from multiprocessing import cpu_count, Process, active_children * from multiprocessing.sharedctypes import RawArray * from ctypes import c_ubyte # <<<<<<<<<<<<<< * from cython.parallel import prange * from libc.stdlib cimport malloc, free */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_c_ubyte); __Pyx_GIVEREF(__pyx_n_s_c_ubyte); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_c_ubyte); __pyx_t_1 = __Pyx_Import(__pyx_n_s_ctypes, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_c_ubyte); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_c_ubyte, __pyx_t_2) < 0) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":21 * from libc.stdint cimport uint32_t, uint8_t, uint64_t * * from nescient.crypto.tools import randbits # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_randbits); __Pyx_GIVEREF(__pyx_n_s_randbits); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_randbits); __pyx_t_2 = __Pyx_Import(__pyx_n_s_nescient_crypto_tools, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_randbits); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_randbits, __pyx_t_1) < 0) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nescient/crypto/chacha.pyx":45 * * # Determine the system's endianness * cdef bint big_endian = sys.byteorder == 'big' # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_sys); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_byteorder); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_n_s_big, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_8nescient_6crypto_6chacha_big_endian = __pyx_t_3; /* "nescient/crypto/chacha.pyx":168 * * #foo * class ChaChaCrypter: # <<<<<<<<<<<<<< * """ A Crypter object used for encrypting or decrypting arbitrary data using the ChaCha stream cipher. * */ __pyx_t_2 = __Pyx_Py3MetaclassPrepare((PyObject *) NULL, __pyx_empty_tuple, __pyx_n_s_ChaChaCrypter, __pyx_n_s_ChaChaCrypter, (PyObject *) NULL, __pyx_n_s_nescient_crypto_chacha, __pyx_kp_s_A_Crypter_object_used_for_encry); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 168, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "nescient/crypto/chacha.pyx":178 * key (bytes): The 256 bit key used to encrypt/decrypt data. * """ * modes = ['stm'] # Represents stream cipher mode # <<<<<<<<<<<<<< * auth = ['sha'] * */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_stm); __Pyx_GIVEREF(__pyx_n_s_stm); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_stm); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_modes, __pyx_t_1) < 0) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":179 * """ * modes = ['stm'] # Represents stream cipher mode * auth = ['sha'] # <<<<<<<<<<<<<< * * def __init__(self, key): */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_sha); __Pyx_GIVEREF(__pyx_n_s_sha); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_sha); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_auth, __pyx_t_1) < 0) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":181 * auth = ['sha'] * * def __init__(self, key): # <<<<<<<<<<<<<< * assert len(key) == 32 * self.key = key[:] */ __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_8nescient_6crypto_6chacha_13ChaChaCrypter_1__init__, 0, __pyx_n_s_ChaChaCrypter___init, NULL, __pyx_n_s_nescient_crypto_chacha, __pyx_d, ((PyObject *)__pyx_codeobj__27)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_init, __pyx_t_1) < 0) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":188 * * # The function that actually performs ChaCha encryption/decryption * def _chacha_task(self, data, nonce, count=1): # <<<<<<<<<<<<<< * # Convert key from bytes to little-endian words * cdef uint32_t * key_w = bytes_to_words(self.key, 32) */ __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_8nescient_6crypto_6chacha_13ChaChaCrypter_3_chacha_task, 0, __pyx_n_s_ChaChaCrypter__chacha_task, NULL, __pyx_n_s_nescient_crypto_chacha, __pyx_d, ((PyObject *)__pyx_codeobj__29)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_tuple__30); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_chacha_task, __pyx_t_1) < 0) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":199 * PyMem_Free(nonce_w) * * def chacha_encrypt(self, data, nonce=None, count=1, force_single_thread=False): # <<<<<<<<<<<<<< * """ Encrypt (or decrypt) in-memory data using ChaCha20. * */ __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_8nescient_6crypto_6chacha_13ChaChaCrypter_5chacha_encrypt, 0, __pyx_n_s_ChaChaCrypter_chacha_encrypt, NULL, __pyx_n_s_nescient_crypto_chacha, __pyx_d, ((PyObject *)__pyx_codeobj__32)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_tuple__33); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_chacha_encrypt, __pyx_t_1) < 0) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nescient/crypto/chacha.pyx":168 * * #foo * class ChaChaCrypter: # <<<<<<<<<<<<<< * """ A Crypter object used for encrypting or decrypting arbitrary data using the ChaCha stream cipher. * */ __pyx_t_1 = __Pyx_Py3ClassCreate(((PyObject*)&__Pyx_DefaultClassType), __pyx_n_s_ChaChaCrypter, __pyx_empty_tuple, __pyx_t_2, NULL, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 168, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_ChaChaCrypter, __pyx_t_1) < 0) __PYX_ERR(0, 168, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nescient/crypto/chacha.pyx":1 * # Nescient: A Python program for packing/unpacking encrypted, salted, and authenticated file containers. # <<<<<<<<<<<<<< * # Copyright (C) 2018 Ariel Antonitis. Licensed under the MIT license. * # */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":208 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 208, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":285 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__34, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":286 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__35, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":287 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__36, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":290 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__37, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":291 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__38, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":315 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":316 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_4[0] = PyThread_allocate_lock(); __pyx_t_4[1] = PyThread_allocate_lock(); __pyx_t_4[2] = PyThread_allocate_lock(); __pyx_t_4[3] = PyThread_allocate_lock(); __pyx_t_4[4] = PyThread_allocate_lock(); __pyx_t_4[5] = PyThread_allocate_lock(); __pyx_t_4[6] = PyThread_allocate_lock(); __pyx_t_4[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_4, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":544 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 544, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 544, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":990 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 990, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init nescient.crypto.chacha", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nescient.crypto.chacha"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_USE_TYPE_SLOTS PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_USE_TYPE_SLOTS result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* PyObjectSetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_setattro)) return tp->tp_setattro(obj, attr_name, value); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_setattr)) return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); #endif return PyObject_SetAttr(obj, attr_name, value); } #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); if (likely(result)) { Py_INCREF(result); } else if (unlikely(PyErr_Occurred())) { result = NULL; } else { #else result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #endif #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = f->f_localsplus; for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if PY_VERSION_HEX >= 0x030700A3 *type = tstate->exc_state.exc_type; *value = tstate->exc_state.exc_value; *tb = tstate->exc_state.exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A3 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = type; tstate->exc_state.exc_value = value; tstate->exc_state.exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if PY_VERSION_HEX >= 0x030700A3 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = local_type; tstate->exc_state.exc_value = local_value; tstate->exc_state.exc_traceback = local_tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A3 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = *type; tstate->exc_state.exc_value = *value; tstate->exc_state.exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* FetchCommonType */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { PyObject* fake_module; PyTypeObject* cached_type = NULL; fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); if (!fake_module) return NULL; Py_INCREF(fake_module); cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); if (cached_type) { if (!PyType_Check((PyObject*)cached_type)) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s is not a type object", type->tp_name); goto bad; } if (cached_type->tp_basicsize != type->tp_basicsize) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s has the wrong size, try recompiling", type->tp_name); goto bad; } } else { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); if (PyType_Ready(type) < 0) goto bad; if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) goto bad; Py_INCREF(type); cached_type = type; } done: Py_DECREF(fake_module); return cached_type; bad: Py_XDECREF(cached_type); cached_type = NULL; goto done; } /* CythonFunction */ #include <structmember.h> static PyObject * __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) { if (unlikely(op->func_doc == NULL)) { if (op->func.m_ml->ml_doc) { #if PY_MAJOR_VERSION >= 3 op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); #else op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); #endif if (unlikely(op->func_doc == NULL)) return NULL; } else { Py_INCREF(Py_None); return Py_None; } } Py_INCREF(op->func_doc); return op->func_doc; } static int __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp = op->func_doc; if (value == NULL) { value = Py_None; } Py_INCREF(value); op->func_doc = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); #else op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); #endif if (unlikely(op->func_name == NULL)) return NULL; } Py_INCREF(op->func_name); return op->func_name; } static int __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = op->func_name; Py_INCREF(value); op->func_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int __Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = op->func_qualname; Py_INCREF(value); op->func_qualname = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) { PyObject *self; self = m->func_closure; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyObject * __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); if (unlikely(op->func_dict == NULL)) return NULL; } Py_INCREF(op->func_dict); return op->func_dict; } static int __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; if (unlikely(value == NULL)) { PyErr_SetString(PyExc_TypeError, "function's dictionary may not be deleted"); return -1; } if (unlikely(!PyDict_Check(value))) { PyErr_SetString(PyExc_TypeError, "setting function's dictionary to a non-dict"); return -1; } tmp = op->func_dict; Py_INCREF(value); op->func_dict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * __Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op) { Py_INCREF(Py_None); return Py_None; } static PyObject * __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); return result; } static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); #else op->defaults_tuple = PySequence_ITEM(res, 0); if (unlikely(!op->defaults_tuple)) result = -1; else { op->defaults_kwdict = PySequence_ITEM(res, 1); if (unlikely(!op->defaults_kwdict)) result = -1; } #endif Py_DECREF(res); return result; } static int __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyTuple_Check(value)) { PyErr_SetString(PyExc_TypeError, "__defaults__ must be set to a tuple object"); return -1; } Py_INCREF(value); tmp = op->defaults_tuple; op->defaults_tuple = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_tuple; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__kwdefaults__ must be set to a dict object"); return -1; } Py_INCREF(value); tmp = op->defaults_kwdict; op->defaults_kwdict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_kwdict; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; } else if (!PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__annotations__ must be set to a dict object"); return -1; } Py_XINCREF(value); tmp = op->func_annotations; op->func_annotations = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); if (unlikely(!result)) return NULL; op->func_annotations = result; } Py_INCREF(result); return result; } static PyGetSetDef __pyx_CyFunction_getsets[] = { {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, {0, 0, 0, 0, 0} }; static PyMemberDef __pyx_CyFunction_members[] = { {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0}, {0, 0, 0, 0, 0} }; static PyObject * __Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(m->func.m_ml->ml_name); #else return PyString_FromString(m->func.m_ml->ml_name); #endif } static PyMethodDef __pyx_CyFunction_methods[] = { {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, {0, 0, 0, 0} }; #if PY_VERSION_HEX < 0x030500A0 #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) #else #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) #endif static PyObject *__Pyx_CyFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { __pyx_CyFunctionObject *op = PyObject_GC_New(__pyx_CyFunctionObject, type); if (op == NULL) return NULL; op->flags = flags; __Pyx_CyFunction_weakreflist(op) = NULL; op->func.m_ml = ml; op->func.m_self = (PyObject *) op; Py_XINCREF(closure); op->func_closure = closure; Py_XINCREF(module); op->func.m_module = module; op->func_dict = NULL; op->func_name = NULL; Py_INCREF(qualname); op->func_qualname = qualname; op->func_doc = NULL; op->func_classobj = NULL; op->func_globals = globals; Py_INCREF(op->func_globals); Py_XINCREF(code); op->func_code = code; op->defaults_pyobjects = 0; op->defaults = NULL; op->defaults_tuple = NULL; op->defaults_kwdict = NULL; op->defaults_getter = NULL; op->func_annotations = NULL; PyObject_GC_Track(op); return (PyObject *) op; } static int __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) { Py_CLEAR(m->func_closure); Py_CLEAR(m->func.m_module); Py_CLEAR(m->func_dict); Py_CLEAR(m->func_name); Py_CLEAR(m->func_qualname); Py_CLEAR(m->func_doc); Py_CLEAR(m->func_globals); Py_CLEAR(m->func_code); Py_CLEAR(m->func_classobj); Py_CLEAR(m->defaults_tuple); Py_CLEAR(m->defaults_kwdict); Py_CLEAR(m->func_annotations); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); PyObject_Free(m->defaults); m->defaults = NULL; } return 0; } static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) { if (__Pyx_CyFunction_weakreflist(m) != NULL) PyObject_ClearWeakRefs((PyObject *) m); __Pyx_CyFunction_clear(m); PyObject_GC_Del(m); } static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) { PyObject_GC_UnTrack(m); __Pyx__CyFunction_dealloc(m); } static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->func_closure); Py_VISIT(m->func.m_module); Py_VISIT(m->func_dict); Py_VISIT(m->func_name); Py_VISIT(m->func_qualname); Py_VISIT(m->func_doc); Py_VISIT(m->func_globals); Py_VISIT(m->func_code); Py_VISIT(m->func_classobj); Py_VISIT(m->defaults_tuple); Py_VISIT(m->defaults_kwdict); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_VISIT(pydefaults[i]); } return 0; } static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(func); return func; } if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; return __Pyx_PyMethod_New(func, obj, type); } static PyObject* __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("<cyfunction %U at %p>", op->func_qualname, (void *)op); #else return PyString_FromFormat("<cyfunction %s at %p>", PyString_AsString(op->func_qualname), (void *)op); #endif } static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = f->m_ml->ml_meth; Py_ssize_t size; switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)meth)(self, arg, kw); case METH_NOARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 1)) { PyObject *result, *arg0; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS arg0 = PyTuple_GET_ITEM(arg, 0); #else arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; #endif result = (*meth)(self, arg0); #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) Py_DECREF(arg0); #endif return result; } PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "__Pyx_CyFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); } static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { PyObject *result; __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { Py_ssize_t argc; PyObject *new_args; PyObject *self; argc = PyTuple_GET_SIZE(args); new_args = PyTuple_GetSlice(args, 1, argc); if (unlikely(!new_args)) return NULL; self = PyTuple_GetItem(args, 0); if (unlikely(!self)) { Py_DECREF(new_args); return NULL; } result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); Py_DECREF(new_args); } else { result = __Pyx_CyFunction_Call(func, args, kw); } return result; } static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", sizeof(__pyx_CyFunctionObject), 0, (destructor) __Pyx_CyFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif (reprfunc) __Pyx_CyFunction_repr, 0, 0, 0, 0, __Pyx_CyFunction_CallAsMethod, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, 0, (traverseproc) __Pyx_CyFunction_traverse, (inquiry) __Pyx_CyFunction_clear, 0, #if PY_VERSION_HEX < 0x030500A0 offsetof(__pyx_CyFunctionObject, func_weakreflist), #else offsetof(PyCFunctionObject, m_weakreflist), #endif 0, 0, __pyx_CyFunction_methods, __pyx_CyFunction_members, __pyx_CyFunction_getsets, 0, 0, __Pyx_CyFunction_descr_get, 0, offsetof(__pyx_CyFunctionObject, func_dict), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif }; static int __pyx_CyFunction_init(void) { __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (unlikely(__pyx_CyFunctionType == NULL)) { return -1; } return 0; } static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults = PyObject_Malloc(size); if (unlikely(!m->defaults)) return PyErr_NoMemory(); memset(m->defaults, 0, size); m->defaults_pyobjects = pyobjects; return m->defaults; } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_tuple = tuple; Py_INCREF(tuple); } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_kwdict = dict; Py_INCREF(dict); } static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->func_annotations = dict; Py_INCREF(dict); } /* CalculateMetaclass */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); for (i=0; i < nbases; i++) { PyTypeObject *tmptype; PyObject *tmp = PyTuple_GET_ITEM(bases, i); tmptype = Py_TYPE(tmp); #if PY_MAJOR_VERSION < 3 if (tmptype == &PyClass_Type) continue; #endif if (!metaclass) { metaclass = tmptype; continue; } if (PyType_IsSubtype(metaclass, tmptype)) continue; if (PyType_IsSubtype(tmptype, metaclass)) { metaclass = tmptype; continue; } PyErr_SetString(PyExc_TypeError, "metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases"); return NULL; } if (!metaclass) { #if PY_MAJOR_VERSION < 3 metaclass = &PyClass_Type; #else metaclass = &PyType_Type; #endif } Py_INCREF((PyObject*) metaclass); return (PyObject*) metaclass; } /* Py3ClassCreate */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { PyObject *ns; if (metaclass) { PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); if (prep) { PyObject *pargs = PyTuple_Pack(2, name, bases); if (unlikely(!pargs)) { Py_DECREF(prep); return NULL; } ns = PyObject_Call(prep, pargs, mkw); Py_DECREF(prep); Py_DECREF(pargs); } else { if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; PyErr_Clear(); ns = PyDict_New(); } } else { ns = PyDict_New(); } if (unlikely(!ns)) return NULL; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; return ns; bad: Py_DECREF(ns); return NULL; } static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass) { PyObject *result, *margs; PyObject *owned_metaclass = NULL; if (allow_py2_metaclass) { owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); if (owned_metaclass) { metaclass = owned_metaclass; } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { PyErr_Clear(); } else { return NULL; } } if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); Py_XDECREF(owned_metaclass); if (unlikely(!metaclass)) return NULL; owned_metaclass = metaclass; } margs = PyTuple_Pack(3, name, bases, dict); if (unlikely(!margs)) { result = NULL; } else { result = PyObject_Call(metaclass, margs, mkw); Py_DECREF(margs); } Py_XDECREF(owned_metaclass); return result; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint64_t(uint64_t value) { const uint64_t neg_one = (uint64_t) -1, const_zero = (uint64_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint64_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint64_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint64_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint64_t), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint32_t(uint32_t value) { const uint32_t neg_one = (uint32_t) -1, const_zero = (uint32_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint32_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint32_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint32_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint32_t), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE uint64_t __Pyx_PyInt_As_uint64_t(PyObject *x) { const uint64_t neg_one = (uint64_t) -1, const_zero = (uint64_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(uint64_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(uint64_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (uint64_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint64_t) 0; case 1: __PYX_VERIFY_RETURN_INT(uint64_t, digit, digits[0]) case 2: if (8 * sizeof(uint64_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) >= 2 * PyLong_SHIFT) { return (uint64_t) (((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])); } } break; case 3: if (8 * sizeof(uint64_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) >= 3 * PyLong_SHIFT) { return (uint64_t) (((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])); } } break; case 4: if (8 * sizeof(uint64_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) >= 4 * PyLong_SHIFT) { return (uint64_t) (((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (uint64_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(uint64_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint64_t) 0; case -1: __PYX_VERIFY_RETURN_INT(uint64_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(uint64_t, digit, +digits[0]) case -2: if (8 * sizeof(uint64_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) { return (uint64_t) (((uint64_t)-1)*(((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case 2: if (8 * sizeof(uint64_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) { return (uint64_t) ((((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case -3: if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) { return (uint64_t) (((uint64_t)-1)*(((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case 3: if (8 * sizeof(uint64_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) { return (uint64_t) ((((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case -4: if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 4 * PyLong_SHIFT) { return (uint64_t) (((uint64_t)-1)*(((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case 4: if (8 * sizeof(uint64_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 4 * PyLong_SHIFT) { return (uint64_t) ((((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; } #endif if (sizeof(uint64_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else uint64_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (uint64_t) -1; } } else { uint64_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (uint64_t) -1; val = __Pyx_PyInt_As_uint64_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to uint64_t"); return (uint64_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to uint64_t"); return (uint64_t) -1; } /* CIntFromPy */ static CYTHON_INLINE uint8_t __Pyx_PyInt_As_uint8_t(PyObject *x) { const uint8_t neg_one = (uint8_t) -1, const_zero = (uint8_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(uint8_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(uint8_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (uint8_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint8_t) 0; case 1: __PYX_VERIFY_RETURN_INT(uint8_t, digit, digits[0]) case 2: if (8 * sizeof(uint8_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) >= 2 * PyLong_SHIFT) { return (uint8_t) (((((uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])); } } break; case 3: if (8 * sizeof(uint8_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) >= 3 * PyLong_SHIFT) { return (uint8_t) (((((((uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])); } } break; case 4: if (8 * sizeof(uint8_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) >= 4 * PyLong_SHIFT) { return (uint8_t) (((((((((uint8_t)digits[3]) << PyLong_SHIFT) | (uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (uint8_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(uint8_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(uint8_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint8_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint8_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint8_t) 0; case -1: __PYX_VERIFY_RETURN_INT(uint8_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(uint8_t, digit, +digits[0]) case -2: if (8 * sizeof(uint8_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) - 1 > 2 * PyLong_SHIFT) { return (uint8_t) (((uint8_t)-1)*(((((uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]))); } } break; case 2: if (8 * sizeof(uint8_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) - 1 > 2 * PyLong_SHIFT) { return (uint8_t) ((((((uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]))); } } break; case -3: if (8 * sizeof(uint8_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) - 1 > 3 * PyLong_SHIFT) { return (uint8_t) (((uint8_t)-1)*(((((((uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]))); } } break; case 3: if (8 * sizeof(uint8_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) - 1 > 3 * PyLong_SHIFT) { return (uint8_t) ((((((((uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]))); } } break; case -4: if (8 * sizeof(uint8_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) - 1 > 4 * PyLong_SHIFT) { return (uint8_t) (((uint8_t)-1)*(((((((((uint8_t)digits[3]) << PyLong_SHIFT) | (uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]))); } } break; case 4: if (8 * sizeof(uint8_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint8_t) - 1 > 4 * PyLong_SHIFT) { return (uint8_t) ((((((((((uint8_t)digits[3]) << PyLong_SHIFT) | (uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]))); } } break; } #endif if (sizeof(uint8_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(uint8_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint8_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint8_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else uint8_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (uint8_t) -1; } } else { uint8_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (uint8_t) -1; val = __Pyx_PyInt_As_uint8_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to uint8_t"); return (uint8_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to uint8_t"); return (uint8_t) -1; } /* CIntFromPy */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *x) { const uint32_t neg_one = (uint32_t) -1, const_zero = (uint32_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(uint32_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(uint32_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (uint32_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, digits[0]) case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 2 * PyLong_SHIFT) { return (uint32_t) (((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 3 * PyLong_SHIFT) { return (uint32_t) (((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 4 * PyLong_SHIFT) { return (uint32_t) (((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (uint32_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(uint32_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case -1: __PYX_VERIFY_RETURN_INT(uint32_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, +digits[0]) case -2: if (8 * sizeof(uint32_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) ((((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -3: if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) ((((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -4: if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) ((((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; } #endif if (sizeof(uint32_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else uint32_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (uint32_t) -1; } } else { uint32_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (uint32_t) -1; val = __Pyx_PyInt_As_uint32_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to uint32_t"); return (uint32_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to uint32_t"); return (uint32_t) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_nn_uint8_t(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 1, &__Pyx_TypeInfo_nn_uint8_t, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* FunctionExport */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(__pyx_m, (char *)"__pyx_capi__"); if (!d) { PyErr_Clear(); d = PyDict_New(); if (!d) goto bad; Py_INCREF(d); if (PyModule_AddObject(__pyx_m, (char *)"__pyx_capi__", d) < 0) goto bad; } tmp.fp = f; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(tmp.p, sig, 0); #else cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0); #endif if (!cobj) goto bad; if (PyDict_SetItemString(d, name, cobj) < 0) goto bad; Py_DECREF(cobj); Py_DECREF(d); return 0; bad: Py_XDECREF(cobj); Py_XDECREF(d); return -1; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
uni_dir_ctx.h
/* * Copyright (c) 2018 Intel Corporation. All rights reserved. * This software is available to you under the BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ static inline void uni_bw_ctx(int len, perf_metrics_t *metric_info, int streaming_node) { double start = 0.0, end = 0.0; unsigned long int i, j; int dest = partner_node(metric_info); static int check_once = 0; if (!check_once) { /* check to see whether sender and receiver are the same process */ if (dest == metric_info->my_node) { fprintf(stderr, "Warning: Sender and receiver are the same " "process (%d)\n", dest); } /* hostname validation for all sender and receiver processes */ int status = check_hostname_validation(metric_info); if (status != 0) return; check_once++; } shmem_barrier_all(); if (streaming_node) { #pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); /* Exit with success to avoid test failures in automated testing */ shmem_global_exit(0); } for (i = 0; i < metric_info->warmup; i++) { for (j = 0; j < metric_info->window_size; j++) { #ifdef USE_NONBLOCKING_API shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #else shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif } shmem_ctx_quiet(ctx); } shmem_ctx_destroy(ctx); } } shmem_barrier_all(); if (streaming_node) { #pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); /* Exit with success to avoid test failures in automated testing */ shmem_global_exit(0); } #pragma omp barrier #pragma omp master { start = perf_shmemx_wtime(); } for (i = 0; i < metric_info->trials; i++) { for (j = 0; j < metric_info->window_size; j++) { #ifdef USE_NONBLOCKING_API shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #else shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif } shmem_ctx_quiet(ctx); } shmem_ctx_destroy(ctx); } } shmem_barrier_all(); if (streaming_node) { end = perf_shmemx_wtime(); calc_and_print_results(end, start, len, metric_info); } shmem_barrier_all(); }
task_memory.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define USE_PRIVATE_TOOL 1 #include "callback.h" #include <omp.h> int main() { int x; #pragma omp parallel num_threads(2) { #pragma omp master { #pragma omp task { x++; } #pragma omp task firstprivate(x) { x++; } } } return 0; } static void on_ompt_callback_implicit_task(ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num, int flag) { void *addr = NULL; size_t size = 0; int result = ompt_get_task_memory(&addr, &size, 0); switch (endpoint) { case ompt_scope_begin: task_data->value = ompt_get_unique_id(); printf("ompt_event_implicit_task_begin: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d \n", task_data->value, addr, size, result); break; case ompt_scope_end: printf("ompt_event_implicit_task_end: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d \n", task_data->value, addr, size, result); break; } } static void on_ompt_callback_task_create(ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *new_task_data, int flags, int has_dependences, const void *codeptr_ra) { if (flags & ompt_task_initial) return; // not interested in the initial task new_task_data->value = ompt_get_unique_id(); void *addr = NULL; size_t size = 0; printf("ompt_event_task_create: task_id=%" PRIu64 "\n", new_task_data->value); } static void on_ompt_callback_task_schedule(ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { void *addr = NULL; size_t size = 0; int result = ompt_get_task_memory(&addr, &size, 0); printf("ompt_event_task_schedule: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d\n", first_task_data->value, addr, size, result); } int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t)lookup("ompt_set_callback"); ompt_get_unique_id = (ompt_get_unique_id_t)lookup("ompt_get_unique_id"); ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory"); register_callback(ompt_callback_implicit_task); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); printf("0: NULL_POINTER=%p\n", (void *)NULL); return 1; // success } void ompt_finalize(ompt_data_t *tool_data) {} ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize, &ompt_finalize, 0}; return &ompt_start_tool_result; } // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: ompt_event_implicit_task_begin: task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: memory_addr=[[NULL]], memory_size=0, result=0 // CHECK: ompt_event_task_create: task_id=[[TASK_ID_0:[0-9]+]] // CHECK: ompt_event_task_create: task_id=[[TASK_ID_1:[0-9]+]] // Expects non-zero address, size, and result // CHECK-DAG: ompt_event_task_schedule: task_id=[[TASK_ID_0]], // memory_addr=0x{{[0-f]+}}, memory_size={{[1-9][0-9]*}}, result=1 // CHECK-DAG: ompt_event_task_schedule: task_id=[[TASK_ID_1]], // memory_addr=0x{{[0-f]+}}, memory_size={{[1-9][0-9]*}}, result=1 // CHECK: ompt_event_implicit_task_end: task_id=[[TASK_ID]] // CHECK-SAME: memory_addr=[[NULL]], memory_size=0, result=0
lud_omp.c
#include <stdio.h> #include <omp.h> extern int omp_num_threads; void lud_omp(float *a, int size) { int i, j, k; float sum; printf("num of threads = %d\n", omp_num_threads); for (i = 0; i < size; i++) { omp_set_num_threads(omp_num_threads); #pragma omp parallel for default(none) private(j, k, sum) shared(size, i, a) for (j = i; j < size; j++) { sum = a[i * size + j]; for (k = 0; k < i; k++) sum -= a[i * size + k] * a[k * size + j]; a[i * size + j] = sum; } #pragma omp parallel for default(none) private(j, k, sum) shared(size, i, a) for (j = i + 1; j < size; j++) { sum = a[j * size + i]; for (k = 0; k < i; k++) sum -= a[j * size + k] * a[k * size + i]; a[j * size + i] = sum / a[i * size + i]; } } }
GB_binop__bset_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int16) // C=scalar+B GB (_bind1st__bset_int16) // C=scalar+B' GB (_bind1st_tran__bset_int16) // C=A+scalar GB (_bind2nd__bset_int16) // C=A'+scalar GB (_bind2nd_tran__bset_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_BITSET (aij, bij, int16_t, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT16 || GxB_NO_BSET_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bset_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bset_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bset_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); for (const ExpressionEvaluationContextRecord &context : llvm::reverse(ExprEvalContexts)) { if (context.isImmediateFunctionContext()) return true; if (context.isUnevaluated()) return false; } return false; } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
ft_single.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh --------------------------------------------------------------------*/ //#include "npb-C.h" /* NAS Parallel Benchmarks 2.3 OpenMP C Versions */ #include <stdio.h> #include <stdlib.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ typedef int boolean; typedef struct { double real; double imag; } dcomplex; #define TRUE 1 #define FALSE 0 #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) #define pow2(a) ((a)*(a)) #define get_real(c) c.real #define get_imag(c) c.imag #define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag) #define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag) #define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \ c.imag = a.real * b.imag + a.imag * b.real) #define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b) extern double randlc(double *, double); extern void vranlc(int, double *, double, double *); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); /* global variables */ //#include "global.h" /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'B' #endif #if CLASS == 'S' /* CLASS = S */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 64 #define NY 64 #define NZ 64 #define MAXDIM 64 #define NITER_DEFAULT 6 #define NTOTAL 262144 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'W' /* CLASS = W */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 128 #define NY 128 #define NZ 32 #define MAXDIM 128 #define NITER_DEFAULT 6 #define NTOTAL 524288 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'A' /* CLASS = A */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 256 #define NY 256 #define NZ 128 #define MAXDIM 256 #define NITER_DEFAULT 6 #define NTOTAL 8388608 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'B' /* CLASS = B */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 512 #define NY 256 #define NZ 256 #define MAXDIM 512 #define NITER_DEFAULT 20 #define NTOTAL 33554432 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'C' /* CLASS = C */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 512 #define NY 512 #define NZ 512 #define MAXDIM 512 #define NITER_DEFAULT 20 #define NTOTAL 134217728 #define CONVERTDOUBLE FALSE #endif #define COMPILETIME "28 Oct 2014" #define NPBVERSION "2.3" #define CS1 "gcc" #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-fopenmp -O2" #define CS6 "-lm -fopenmp" #define CS7 "randdp" /* c If processor array is 1x1 -> 0D grid decomposition c Cache blocking params. These values are good for most c RISC processors. c FFT parameters: c fftblock controls how many ffts are done at a time. c The default is appropriate for most cache-based machines c On vector machines, the FFT can be vectorized with vector c length equal to the block size, so the block size should c be as large as possible. This is the size of the smallest c dimension of the problem: 128 for class A, 256 for class B and c 512 for class C. */ #define FFTBLOCK_DEFAULT 16 #define FFTBLOCKPAD_DEFAULT 18 #define FFTBLOCK FFTBLOCK_DEFAULT #define FFTBLOCKPAD FFTBLOCKPAD_DEFAULT /* COMMON block: blockinfo */ int fftblock; int fftblockpad; /* c we need a bunch of logic to keep track of how c arrays are laid out. c Note: this serial version is the derived from the parallel 0D case c of the ft NPB. c The computation proceeds logically as c set up initial conditions c fftx(1) c transpose (1->2) c ffty(2) c transpose (2->3) c fftz(3) c time evolution c fftz(3) c transpose (3->2) c ffty(2) c transpose (2->1) c fftx(1) c compute residual(1) c for the 0D, 1D, 2D strategies, the layouts look like xxx c c 0D 1D 2D c 1: xyz xyz xyz c 2: xyz xyz yxz c 3: xyz zyx zxy c the array dimensions are stored in dims(coord, phase) */ /* COMMON block: layout */ static int dims[3][3]; static int xstart[3]; static int ystart[3]; static int zstart[3]; static int xend[3]; static int yend[3]; static int zend[3]; #define T_TOTAL 0 #define T_SETUP 1 #define T_FFT 2 #define T_EVOLVE 3 #define T_CHECKSUM 4 #define T_FFTLOW 5 #define T_FFTCOPY 6 #define T_MAX 7 #define TIMERS_ENABLED FALSE /* other stuff */ #define SEED 314159265.0 #define A 1220703125.0 #define PI 3.141592653589793238 #define ALPHA 1.0e-6 #define EXPMAX (NITER_DEFAULT*(NX*NX/4+NY*NY/4+NZ*NZ/4)) /* COMMON block: excomm */ static double ex[EXPMAX+1]; /* ex(0:expmax) */ /* c roots of unity array c relies on x being largest dimension? */ /* COMMON block: ucomm */ static dcomplex u[NX]; /* for checksum data */ /* COMMON block: sumcomm */ static dcomplex sums[NITER_DEFAULT+1]; /* sums(0:niter_default) */ /* number of iterations*/ /* COMMON block: iter */ static int niter; /* function declarations */ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]); static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]); static void print_timers(void); static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]); static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void fft_init (int n); static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static int ilog2(int n); static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]); static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *cclass); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main(int argc, char **argv) { /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static dcomplex u0[NZ][NY][NX]; static dcomplex pad1[3]; static dcomplex u1[NZ][NY][NX]; static dcomplex pad2[3]; static dcomplex u2[NZ][NY][NX]; static dcomplex pad3[3]; static int indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char cclass; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } setup(); #pragma omp parallel { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } fft(1, u1, u0); } /* end parallel */ /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); #pragma omp parallel private(iter) firstprivate(niter) { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(1, u1, u0); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_EVOLVE); } evolve(u0, u1, iter, indexmap, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(-1, u1, u2); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_CHECKSUM); } checksum(iter, u2, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_CHECKSUM); } } #pragma omp single verify(NX, NY, NZ, niter, &verified, &cclass); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if( total_time != 0.0) { mflops = 1.0e-6*(double)(NTOTAL) * (14.8157+7.19641*log((double)(NTOTAL)) + (5.23518+7.21113*log((double)(NTOTAL)))*niter) /total_time; } else { mflops = 0.0; } c_print_results("FT", cclass, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c evolve u0 -> u1 (t time steps) in fourier space c-------------------------------------------------------------------*/ int i, j, k; #pragma omp for for (k = 0; k < d[2]; k++) { for (j = 0; j < d[1]; j++) { for (i = 0; i < d[0]; i++) { crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX*2*MAXDIM+1]; int i,j,t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an); dummy = randlc(&start, an); ipow46(A, 2*NX*NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < dims[0][2]; k++) { x0 = start; vranlc(2*NX*dims[0][1], &x0, A, tmp); t = 1; for (j = 0; j < dims[0][1]; j++) for (i = 0; i < NX; i++) { u0[k][j][i].real = tmp[t++]; u0[k][j][i].imag = tmp[t++]; } if (k != dims[0][2]) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n/2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n-1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ for (i = 0;i < 3 ; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ int i, j, k, ii, ii2, jj, ij2, kk; double ap; /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < dims[2][0]; i++) { ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2; ii2 = ii*ii; for (j = 0; j < dims[2][1]; j++) { jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2; ij2 = jj*jj+ii2; for (k = 0; k < dims[2][2]; k++) { kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2; indexmap[k][j][i] = kk*kk+ij2; } } } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ #pragma omp single { ap = - 4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i-1]*ex[1]; } } /* end single */ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ if (dir == 1) { cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */ } else { cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, jj; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { y0[i][j].real = x[k][j+jj][i].real; y0[i][j].imag = x[k][j+jj][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[0], d[0], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { xout[k][j+jj][i].real = y0[i][j].real; xout[k][j+jj][i].imag = y0[i][j].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { y0[j][i].real = x[k][j][i+ii].real; y0[j][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[1], d[1], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[j][i].real; xout[k][j][i+ii].imag = y0[j][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0;i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (j = 0; j < d[1]; j++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { y0[k][i].real = x[k][j][i+ii].real; y0[k][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[2], d[2], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[k][i].real; xout[k][j][i+ii].imag = y0[k][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m,nu,ku,i,j,ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u[0].real = (double)m; u[0].imag = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u[i+ku].real = cos(ti); u[i+ku].imag = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Computes NY N-point complex-to-complex FFTs of X using an algorithm due c to Swarztrauber. X is both the input and the output array, while Y is a c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to c perform FFTs, the array U must be initialized by calling CFFTZ with IS c set to 0 and M set to MX, where MX is the maximum value of M for any c subsequent call. c-------------------------------------------------------------------*/ int i,j,l,mx; /*-------------------------------------------------------------------- c Check if input parameters are invalid. c-------------------------------------------------------------------*/ mx = (int)(u[0].real); if ((is != 1 && is != -1) || m < 1 || m > mx) { printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx); exit(1); } /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= m; l+=2) { fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y); if (l == m) break; fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x); } /*-------------------------------------------------------------------- c Copy Y to X. c-------------------------------------------------------------------*/ if (m % 2 == 1) { for (j = 0; j < n; j++) { for (i = 0; i < fftblock; i++) { x[j][i].real = y[j][i].real; x[j][i].imag = y[j][i].imag; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs the L-th iteration of the second variant of the Stockham FFT. c-------------------------------------------------------------------*/ int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22; dcomplex u1,x11,x21; /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ n1 = n / 2; if (l-1 == 0) { lk = 1; } else { lk = 2 << ((l - 1)-1); } if (m-l == 0) { li = 1; } else { li = 2 << ((m - l)-1); } lj = 2 * lk; ku = li; for (i = 0; i < li; i++) { i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if (is >= 1) { u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; } else { u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k = 0; k < lk; k++) { for (j = 0; j < ny; j++) { double x11real, x11imag; double x21real, x21imag; x11real = x[i11+k][j].real; x11imag = x[i11+k][j].imag; x21real = x[i12+k][j].real; x21imag = x[i12+k][j].imag; y[i21+k][j].real = x11real + x21real; y[i21+k][j].imag = x11imag + x21imag; y[i22+k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[i22+k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int j, q,r,s, ierr; dcomplex chk,allchk; chk.real = 0.0; chk.imag = 0.0; #pragma omp for nowait for (j = 1; j <= 1024; j++) { q = j%NX+1; if (q >= xstart[0] && q <= xend[0]) { r = (3*j)%NY+1; if (r >= ystart[0] && r <= yend[0]) { s = (5*j)%NZ+1; if (s >= zstart[0] && s <= zend[0]) { cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); } } } } #pragma omp critical { sums[i].real += chk.real; sums[i].imag += chk.imag; } #pragma omp barrier #pragma omp single { /* complex % real */ sums[i].real = sums[i].real/(double)(NTOTAL); sums[i].imag = sums[i].imag/(double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums[i].real, sums[i].imag); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *cclass) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6+1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6+1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6+1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6+1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6+1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6+1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20+1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20+1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20+1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20+1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *cclass = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *cclass = 'S'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *cclass = 'W'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *cclass = 'A'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *cclass = 'B'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *cclass = 'C'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*cclass != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("cclass = %1c\n", *cclass); } /* cat ./common/c_print_results.c */ /*****************************************************************/ /****** C _ P R I N T _ R E S U L T S ******/ /*****************************************************************/ void c_print_results( char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand) { char *evalue="1000"; printf( "\n\n %s Benchmark Completed\n", name ); printf( " Class = %c\n", cclass ); if( n2 == 0 && n3 == 0 ) printf( " Size = %12d\n", n1 ); /* as in IS */ else printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 ); printf( " Iterations = %12d\n", niter ); printf( " Threads = %12d\n", nthreads ); printf( " Time in seconds = %12.2f\n", t ); printf( " Mop/s total = %12.2f\n", mops ); printf( " Operation type = %24s\n", optype); if( passed_verification ) printf( " Verification = SUCCESSFUL\n" ); else printf( " Verification = UNSUCCESSFUL\n" ); printf( " Version = %12s\n", npbversion ); printf( " Compile date = %12s\n", compiletime ); printf( "\n Compile options:\n" ); printf( " CC = %s\n", cc ); printf( " CLINK = %s\n", clink ); printf( " C_LIB = %s\n", c_lib ); printf( " C_INC = %s\n", c_inc ); printf( " CFLAGS = %s\n", cflags ); printf( " CLINKFLAGS = %s\n", clinkflags ); printf( " RAND = %s\n", rand ); #ifdef SMP evalue = getenv("MP_SET_NUMTHREADS"); printf( " MULTICPUS = %s\n", evalue ); #endif /* printf( "\n\n" ); printf( " Please send the results of this run to:\n\n" ); printf( " NPB Development Team\n" ); printf( " Internet: npb@nas.nasa.gov\n \n" ); printf( " If email is not available, send this to:\n\n" ); printf( " MS T27A-1\n" ); printf( " NASA Ames Research Center\n" ); printf( " Moffett Field, CA 94035-1000\n\n" ); printf( " Fax: 415-604-3957\n\n" );*/ } /* cat ./common/c_timers.c */ /* #include "wtime.h" #if defined(IBM) #define wtime wtime #elif defined(CRAY) #define wtime WTIME #else #define wtime wtime_ #endif */ /* Prototype */ void wtime( double * ); /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return( t ); } double start[64], elapsed[64]; /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return( elapsed[n] ); } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *)0); // gettimeofday(&tv, (struct timezone *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec; } // common/c_randdp.c /* */ #if defined(USE_POW) #define r23 pow(0.5, 23.0) #define r46 (r23*r23) #define t23 pow(2.0, 23.0) #define t46 (t23*t23) #else #define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5) #define r46 (r23*r23) #define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0) #define t46 (t23*t23) #endif /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ double randlc (double *x, double a) { /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine returns a uniform pseudorandom double precision number in the c range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The returned value RANDLC is normalized to be c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain c the new seed x_1, so that subsequent calls to RANDLC using the same c arguments will generate a continuous sequence. c c This routine should produce the same results on any computer with at least c 48 mantissa bits in double precision floating point data. On 64 bit c systems, double precision should be disabled. c c David H. Bailey October 26, 1990 c c---------------------------------------------------------------------*/ double t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * (*x); x1 = (int)t1; x2 = (*x) - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); (*x) = t3 - t46 * t4; return (r46 * (*x)); } /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ void vranlc (int n, double *x_seed, double a, double* y) { /* void vranlc (int n, double *x_seed, double a, double y[]) { */ /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine generates N uniform pseudorandom double precision numbers in c the range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The N results are placed in Y and are normalized c to be between 0 and 1. X is updated to contain the new seed, so that c subsequent calls to VRANLC using the same arguments will generate a c continuous sequence. If N is zero, only initialization is performed, and c the variables X, A and Y are ignored. c c This routine is the standard version designed for scalar or RISC systems. c However, it should produce the same results on any single processor c computer with at least 48 mantissa bits in double precision floating point c data. On 64 bit systems, double precision should be disabled. c c---------------------------------------------------------------------*/ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; x = *x_seed; /*c--------------------------------------------------------------------- c Generate N results. This loop is not vectorizable. c---------------------------------------------------------------------*/ for (i = 1; i <= n; i++) { /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * x; x1 = (int)t1; x2 = x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); x = t3 - t46 * t4; y[i] = r46 * x; } *x_seed = x; }
a.37.1.c
/* { dg-do compile } */ extern int omp_get_num_threads (void); void work (int i); void incorrect () { int np, i; np = omp_get_num_threads (); /* misplaced */ #pragma omp parallel for schedule(static) for (i = 0; i < np; i++) work (i); }
module_bl_mynn_mym_predict_impl.h
#ifndef __MODULE_BL_MYNN_MYM_PREDICT_IMPL_H__ #define __MODULE_BL_MYNN_MYM_PREDICT_IMPL_H__ // File version granularity. #ifndef MODULE_BL_MYNN_MYM_PREDICT_IMPL_VERSION_MAJOR #define MODULE_BL_MYNN_MYM_PREDICT_IMPL_VERSION_MAJOR 1 #endif #ifndef MODULE_BL_MYNN_MYM_PREDICT_IMPL_VERSION_MINOR #define MODULE_BL_MYNN_MYM_PREDICT_IMPL_VERSION_MINOR 0 #endif #ifndef MODULE_BL_MYNN_MYM_PREDICT_IMPL_PATCH_VERSION #define MODULE_BL_MYNN_MYM_PREDICT_IMPL_PATCH_VERSION 0 #endif #ifndef MODULE_BL_MYNN_MYM_PREDICT_IMPL_CREATE_DATE #define MODULE_BL_MYNN_MYM_PREDICT_IMPL_CREATE_DATE "Date: 11-11-2016 , Time: 15:36 PM GMT+2" #endif // Set this value to successful build date/time. #ifndef MODULE_BL_MYNN_MYM_PREDICT_IMPL_BUILD_DATE #define MODULE_BL_MYNN_MYM_PREDICT_IMPL_BUILD_DATE "" #endif #ifndef MODULE_BL_MYNN_MYM_PREDICT_IMPL_AUTHOR #define MODULE_BL_MYNN_MYM_PREDICT_IMPL_AUTHOR "Name: Bernard Gingold , e-mail: beniekg@gmail.com" #endif #include "module_bl_mynn_F90_iface.h" #include "PhysLib_Config.h" #include "std_headers.h" namespace wrf_phys_wrappers { namespace module_bl_mynn { template<typename R32 = float, typename I32 = int > struct Wrap_Mym_Predict { /****************************************** Constructors and Destructor. ******************************************/ /* @Purpose: Default Constructor - explicitly default. */ Wrap_Mym_Predict() = default; /* @Purpose: 1st 'main' Constructor which purpose is to allocate and initialize scalar and array members. Array members are zero-filled. Caller must later initialize input arrays to correct physical state. */ Wrap_Mym_Predict(_In_ const I32 kts, _In_ const I32 kte, _In_ const I32 levflag, _In_ const I32 bl_mynn_edmf_tke, _In_ const R32 delt, _In_ const R32 flt, _In_ const R32 flq, _In_ const R32 ust, _In_ const R32 pmz, _In_ const R32 phh) : m_kts{ kts }, m_kte{ kte }, m_levflag{ levflag }, m_bl_mynn_edmf_tke{ bl_mynn_edmf_tke }, m_delt{ delt }, m_flt{ flt }, m_flq{ flq }, m_ust{ ust }, m_pmz{ pmz }, m_phh{ phh }, m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_dfq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_el{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdk{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdt{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_qke{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_s_awqke{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) }, m_s_aw{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) } { if (0 > (m_kte - m_kts)) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Invalid array size 1st Ctor: 'Wrap_Mym_Predict'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Lower range value m_kts: " << m_kts << "\n"; std::cerr << "Upper range value m_kte: " << m_kte << "\n"; std::cerr << "Range value difference: " << m_kte - m_kts << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 1st Ctor: 'Wrap_Mym_Predict'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_kte >= (1 << 16)) for (int i = m_kts; i != m_kte; ++i) { m_dz[i] = 0.f; m_dfq[i] = 0.f; m_el[i] = 0.f; m_pdk[i] = 0.f; m_pdt[i] = 0.f; m_pdq[i] = 0.f; m_pdc[i] = 0.f; m_qke[i] = 0.f; m_tsq[i] = 0.f; m_qsq[i] = 0.f; m_cov[i] = 0.f; m_s_awqke[i] = 0.f; m_s_aw[i] = 0.f; } const int top = m_kte + 1; m_s_awqke[top] = 0.f; m_s_aw[top] = 0.f; #else #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_kts; i != m_kte; ++i) { m_dz[i] = 0.f; m_dfq[i] = 0.f; m_el[i] = 0.f; m_pdk[i] = 0.f; m_pdt[i] = 0.f; m_pdq[i] = 0.f; m_pdc[i] = 0.f; m_qke[i] = 0.f; m_tsq[i] = 0.f; m_qsq[i] = 0.f; m_cov[i] = 0.f; m_s_awqke[i] = 0.f; m_s_aw[i] = 0.f; } const int top = m_kte + 1; m_s_awqke[top] = 0.f; m_s_aw[top] = 0.f; #endif } /* @Purpose: 2nd 'main' Constructor which purpose is to allocate and initialize scalar and array members. Array output members are zero-filled. Caller must pass initialized input arrays to correct physical state. */ Wrap_Mym_Predict(_In_ const I32 kts, _In_ const I32 kte, _In_ const I32 levflag, _In_ const I32 bl_mynn_edmf_tke, _In_ const R32 delt, _In_ const R32 flt, _In_ const R32 flq, _In_ const R32 ust, _In_ const R32 pmz, _In_ const R32 phh, _In_ R32* __restrict const dz, _In_ R32* __restrict const dfq, _In_ R32* __restrict const el, _In_ R32* __restrict const pdk, _In_ R32* __restrict const pdt, _In_ R32* __restrict const pdq, _In_ R32* __restrict const pdc, _In_ R32* __restrict const qke, _In_ R32* __restrict const tsq, _In_ R32* __restrict const qsq, _In_ R32* __restrict const cov, _In_ R32* __restrict const s_awqke, _In_ R32* __restrict const s_aw) : m_kts{ kts }, m_kte{ kte }, m_levflag{ levflag }, m_bl_mynn_edmf_tke{ bl_mynn_edmf_tke }, m_delt{ delt }, m_flt{ flt }, m_flq{ flq }, m_ust{ ust }, m_pmz{ pmz }, m_phh{ phh }, m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_dfq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_el{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdk{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdt{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_qke{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_s_awqke{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) }, m_s_aw{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) } { if (0 > (m_kte - m_kts)) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Invalid array size 2nd Ctor: 'Wrap_Mym_Predict'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Lower range value m_kts: " << m_kts << "\n"; std::cerr << "Upper range value m_kte: " << m_kte << "\n"; std::cerr << "Range value difference: " << m_kte - m_kts << "\n"; std::cerr << "Cannot recover --> calling exit(-1)!!\n"; std::exit(-1); } for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mym_Predict'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } if (dz == NULL || dfq == NULL || el == NULL || pdk == NULL || pdt == NULL || pdq == NULL || pdc == NULL || qke == NULL || tsq == NULL || qsq == NULL || cov == NULL || s_awqke == NULL || s_aw == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mym_Predict'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "One or more caller's arrays contains invalid pointer!!\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_kte >= (1 << 16)) for (int i = m_kts; i != m_kte; ++i) { m_dz[i] = dz[i]; m_dfq[i] = dfq[i]; m_el[i] = el[i]; m_pdk[i] = pdk[i]; m_pdt[i] = pdt[i]; m_pdq[i] = pdq[i]; m_pdc[i] = pdc[i]; m_qke[i] = qke[i]; m_tsq[i] = tsq[i]; m_qsq[i] = qsq[i]; m_cov[i] = cov[i]; m_s_awqke[i] = s_awqke[i]; m_s_aw[i] = s_aw[i]; } const int top = m_kte + 1; m_s_awqke[top] = s_awqke[top]; m_s_aw[top] = s_aw[top]; #else #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_kts; i != m_kte; ++i) { m_dz[i] = dz[i]; m_dfq[i] = dfq[i]; m_el[i] = el[i]; m_pdk[i] = pdk[i]; m_pdt[i] = pdt[i]; m_pdq[i] = pdq[i]; m_pdc[i] = pdc[i]; m_qke[i] = qke[i]; m_tsq[i] = tsq[i]; m_qsq[i] = qsq[i]; m_cov[i] = cov[i]; m_s_awqke[i] = s_awqke[i]; m_s_aw[i] = s_aw[i]; } const int top = m_kte + 1; m_s_awqke[top] = s_awqke[top]; m_s_aw[top] = s_aw[top]; #endif } /* @Purpose: Copy Constructor implements deep copy semantics. */ Wrap_Mym_Predict(_In_ const Wrap_Mym_Predict &x) : m_kts{ x.m_kts }, m_kte{ x.m_kte }, m_levflag{ x.m_levflag }, m_bl_mynn_edmf_tke{ x.m_bl_mynn_edmf_tke }, m_delt{ x.m_delt }, m_flt{ x.m_flt }, m_flq{ x.m_flq }, m_ust{ x.m_ust }, m_pmz{ x.m_pmz }, m_phh{ x.m_phh }, m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_dfq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_el{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdk{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdt{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_pdc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_qke{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) }, m_s_awqke{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) }, m_s_aw{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) } { for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy-Ctor: 'Wrap_Mym_Predict'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_kte >= (1 << 16)) for (int i = m_kts; i != m_kte; ++i) { m_dz[i] = x.m_dz[i]; m_dfq[i] = x.m_dfq[i]; m_el[i] = x.m_el[i]; m_pdk[i] = x.m_pdk[i]; m_pdt[i] = x.m_pdt[i]; m_pdq[i] = x.m_pdq[i]; m_pdc[i] = x.m_pdc[i]; m_qke[i] = x.m_qke[i]; m_tsq[i] = x.m_tsq[i]; m_qsq[i] = x.m_qsq[i]; m_cov[i] = x.m_cov[i]; m_s_awqke[i] = x.m_s_awqke[i]; m_s_aw[i] = x.m_s_aw[i]; } const int top = m_kte + 1; m_s_awqke[top] = x.m_s_awqke[top]; m_s_aw[top] = x.m_s_aw[top]; #else #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_kts; i != m_kte; ++i) { m_dz[i] = x.m_dz[i]; m_dfq[i] = x.m_dfq[i]; m_el[i] = x.m_el[i]; m_pdk[i] = x.m_pdk[i]; m_pdt[i] = x.m_pdt[i]; m_pdq[i] = x.m_pdq[i]; m_pdc[i] = x.m_pdc[i]; m_qke[i] = x.m_qke[i]; m_tsq[i] = x.m_tsq[i]; m_qsq[i] = x.m_qsq[i]; m_cov[i] = x.m_cov[i]; m_s_awqke[i] = x.m_s_awqke[i]; m_s_aw[i] = x.m_s_aw[i]; } const int top = m_kte + 1; m_s_awqke[top] = x.m_s_awqke[top]; m_s_aw[top] = x.m_s_aw[top]; #endif } /* @Purpose: Move Constructor implements shallow copy semantics. */ Wrap_Mym_Predict(_In_ Wrap_Mym_Predict &&x) : m_kts{ x.m_kts }, m_kte{ x.m_kte }, m_levflag{ x.m_levflag }, m_bl_mynn_edmf_tke{ x.m_bl_mynn_edmf_tke }, m_delt{ x.m_delt }, m_flt{ x.m_flt }, m_flq{ x.m_flq }, m_ust{ x.m_ust }, m_pmz{ x.m_pmz }, m_phh{ x.m_phh } { for (int i{ 0 }; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = (&x.m_dz)[i]; } for (int i{ 0 }; i != this->m_totArrays; ++i) { (&x.m_dz)[i] = NULL; } x.m_kts = 0; x.m_kte = 0; } /* @Purpose: Class Destructor. */ ~Wrap_Mym_Predict() { for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i]) { _mm_free((&this->m_dz)[i]); } } for (int i{ 0 }; i != this->m_totArrays; ++i) { (this->m_dz)[i] = NULL; } m_kts = 0; m_kte = 0; } /* @Purpose: Copy-assign Operator implements deep copy seamntics. */ Wrap_Mym_Predict & operator=(_In_ const Wrap_Mym_Predict &x) { if (this == &x) return (*this); m_kts = x.m_kts; m_kte = x.m_kte; m_levflag = x.m_levflag; m_bl_mynn_edmf_tke = x.m_bl_mynn_edmf_tke; m_delt = x.m_delt; m_flt = x.m_flt; m_flq = x.m_flq; m_ust = x.m_ust; m_pmz = x.m_pmz; m_phh = x.m_phh; constexpr int ntPtrs1D{13}; R32 *tPtrs1D[ntPtrs1D] = {}; for (int i{ 0 }; i != this->m_totArrays; ++i) { tPtrs1D[i] = reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)),align32B)); } for (int i{ 0 }; i != this->m_totArrays; ++i) { if (tPtrs1D[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Operator: 'Wrap_Mym_Predict'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << tPtrs1D[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_kte >= (1 << 16)) for (int idx = 0; idx != this->m_totArrays; ++idx) { for (int i = m_kts; i != m_kte; ++i) { tPtrs1D[idx][i] = (&x.m_dz)[idx][i]; } } const int top = m_kte + 1; tPtrs1D[11][top] = x.m_s_awqke[top]; tPtrs1D[12][top] = x.m_s_aw[top]; for (int i {0}; i != this->m_totArrays; ++i) { _mm_free((&this->m_dz)[i]); } for (int i {0}; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = tPtrs1D[i]; } return (*this); #else for (int idx = 0; idx != this->m_totArrays; ++idx) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_kts; i != m_kte; ++i) { tPtrs1D[idx][i] = (&x.m_dz)[idx][i]; } } const int top = m_kte + 1; tPtrs1D[11][top] = x.m_s_awqke[top]; tPtrs1D[12][top] = x.m_s_aw[top]; for (int i{ 0 }; i != this->m_totArrays; ++i) { _mm_free((&this->m_dz)[i]); } for (int i{ 0 }; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = tPtrs1D[i]; } return (*this); #endif } /* @Purpose: Move-assign Operator implements shallow copy semantics. */ Wrap_Mym_Predict & operator=(_In_ Wrap_Mym_Predict &&x) { if (this == &x) return (*this); m_kts = x.m_kts; m_kte = x.m_kte; m_levflag = x.m_levflag; m_bl_mynn_edmf_tke = x.m_bl_mynn_edmf_tke; m_delt = x.m_delt; m_flt = x.m_flt; m_flq = x.m_flq; m_ust = x.m_ust; m_pmz = x.m_pmz; m_phh = x.m_phh; for (int i{ 0 }; i != this->m_totArrays; ++i) { _mm_free((&this->m_dz)[i]); } for (int i{ 0 }; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = (&x.m_dz)[i]; } for (int i{ 0 }; i != x.m_totArrays; ++i) { (x.m_dz)[i] = NULL; } return (*this); } /* @Purpose: Call Fortran 90 'MYM_PREDICT' subroutine. */ void Call_Mym_Predict() { MODULE_BL_MYNN_mp_MYM_PREDICT(&this->m_kts,&this->m_kte, &this->m_levflag, &this->m_delt, &this->m_dz[0], &this->m_ust, &this->m_flt, &this->m_flq, &this->m_pmz, &this->m_phh, &this->m_el[0], &this->m_dfq[0], &this->m_pdk[0], &this->m_pdt[0], &this->m_pdq[0], &this->m_pdc[0], &this->m_qke[0], &this->m_tsq[0], &this->m_qsq[0], &this->m_cov[0], &this->m_s_aw[0], &this->m_s_awqke[0], &this->m_bl_mynn_edmf_tke); } /* @Purpose: Member variables. */ I32 m_kts; I32 m_kte; I32 m_levflag; I32 m_bl_mynn_edmf_tke; R32 m_delt; R32 m_flt; R32 m_flq; R32 m_ust; R32 m_pmz; R32 m_phh; // Input arrays. _Field_size_(m_kte) R32* __restrict m_dz; _Field_size_(m_kte) R32* __restrict m_dfq; _Field_size_(m_kte) R32* __restrict m_el; // Input/Output arrays. _Field_size_(m_kte) R32* __restrict m_pdk; _Field_size_(m_kte) R32* __restrict m_pdt; _Field_size_(m_kte) R32* __restrict m_pdq; _Field_size_(m_kte) R32* __restrict m_pdc; _Field_size_(m_kte) R32* __restrict m_qke; _Field_size_(m_kte) R32* __restrict m_tsq; _Field_size_(m_kte) R32* __restrict m_qsq; _Field_size_(m_kte) R32* __restrict m_cov; _Field_size_(m_kte) R32* __restrict m_s_awqke; _Field_size_(m_kte) R32* __restrict m_s_aw; static const int m_totArrays = 13; }; } } #endif /*__MODULE_BL_MYNN_MYM_PREDICT_IMPL_H__*/
oskar_cross_correlate_point_time_smearing_scalar_omp.c
/* * Copyright (c) 2014-2015, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include "correlate/private_correlate_functions_inline.h" #include "correlate/oskar_cross_correlate_point_time_smearing_scalar_omp.h" #ifdef __cplusplus extern "C" { #endif /* Single precision. */ void oskar_cross_correlate_point_time_smearing_scalar_omp_f(int num_sources, int num_stations, const float2* jones, const float* source_I, const float* source_l, const float* source_m, const float* source_n, const float* station_u, const float* station_v, const float* station_w, const float* station_x, const float* station_y, float uv_min_lambda, float uv_max_lambda, float inv_wavelength, float frac_bandwidth, float time_int_sec, float gha0_rad, float dec0_rad, float2* vis) { int SQ; /* Loop over stations. */ #pragma omp parallel for private(SQ) schedule(dynamic, 1) for (SQ = 0; SQ < num_stations; ++SQ) { int SP, i; const float2 *station_p, *station_q; /* Pointer to source vector for station q. */ station_q = &jones[SQ * num_sources]; /* Loop over baselines for this station. */ for (SP = SQ + 1; SP < num_stations; ++SP) { float uv_len, uu, vv, ww, uu2, vv2, uuvv, du, dv, dw; float2 sum, guard; sum.x = 0.0f; sum.y = 0.0f; guard.x = 0.0f; guard.y = 0.0f; /* Pointer to source vector for station p. */ station_p = &jones[SP * num_sources]; /* Get common baseline values. */ oskar_evaluate_baseline_terms_inline_f(station_u[SP], station_u[SQ], station_v[SP], station_v[SQ], station_w[SP], station_w[SQ], inv_wavelength, frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv); /* Apply the baseline length filter. */ if (uv_len < uv_min_lambda || uv_len > uv_max_lambda) continue; /* Compute the deltas for time-average smearing. */ oskar_evaluate_baseline_deltas_inline_f(station_x[SP], station_x[SQ], station_y[SP], station_y[SQ], inv_wavelength, time_int_sec, gha0_rad, dec0_rad, &du, &dv, &dw); /* Loop over sources. */ for (i = 0; i < num_sources; ++i) { float l, m, n, r1, r2; /* Get source direction cosines. */ l = source_l[i]; m = source_m[i]; n = source_n[i]; /* Compute bandwidth- and time-smearing terms. */ r1 = oskar_sinc_f(uu * l + vv * m + ww * (n - 1.0f)); r2 = oskar_evaluate_time_smearing_f(du, dv, dw, l, m, n); r1 *= r2; /* Accumulate baseline visibility response for source. */ oskar_accumulate_baseline_visibility_for_source_scalar_inline_f( &sum, i, source_I, station_p, station_q, r1, &guard); } /* Add result to the baseline visibility. */ i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ); vis[i].x += sum.x; vis[i].y += sum.y; } } } /* Double precision. */ void oskar_cross_correlate_point_time_smearing_scalar_omp_d(int num_sources, int num_stations, const double2* jones, const double* source_I, const double* source_l, const double* source_m, const double* source_n, const double* station_u, const double* station_v, const double* station_w, const double* station_x, const double* station_y, double uv_min_lambda, double uv_max_lambda, double inv_wavelength, double frac_bandwidth, double time_int_sec, double gha0_rad, double dec0_rad, double2* vis) { int SQ; /* Loop over stations. */ #pragma omp parallel for private(SQ) schedule(dynamic, 1) for (SQ = 0; SQ < num_stations; ++SQ) { int SP, i; const double2 *station_p, *station_q; /* Pointer to source vector for station q. */ station_q = &jones[SQ * num_sources]; /* Loop over baselines for this station. */ for (SP = SQ + 1; SP < num_stations; ++SP) { double uv_len, uu, vv, ww, uu2, vv2, uuvv, du, dv, dw; double2 sum; sum.x = 0.0; sum.y = 0.0; /* Pointer to source vector for station p. */ station_p = &jones[SP * num_sources]; /* Get common baseline values. */ oskar_evaluate_baseline_terms_inline_d(station_u[SP], station_u[SQ], station_v[SP], station_v[SQ], station_w[SP], station_w[SQ], inv_wavelength, frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv); /* Apply the baseline length filter. */ if (uv_len < uv_min_lambda || uv_len > uv_max_lambda) continue; /* Compute the deltas for time-average smearing. */ oskar_evaluate_baseline_deltas_inline_d(station_x[SP], station_x[SQ], station_y[SP], station_y[SQ], inv_wavelength, time_int_sec, gha0_rad, dec0_rad, &du, &dv, &dw); /* Loop over sources. */ for (i = 0; i < num_sources; ++i) { double l, m, n, r1, r2; /* Get source direction cosines. */ l = source_l[i]; m = source_m[i]; n = source_n[i]; /* Compute bandwidth- and time-smearing terms. */ r1 = oskar_sinc_d(uu * l + vv * m + ww * (n - 1.0)); r2 = oskar_evaluate_time_smearing_d(du, dv, dw, l, m, n); r1 *= r2; /* Accumulate baseline visibility response for source. */ oskar_accumulate_baseline_visibility_for_source_scalar_inline_d( &sum, i, source_I, station_p, station_q, r1); } /* Add result to the baseline visibility. */ i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ); vis[i].x += sum.x; vis[i].y += sum.y; } } } #ifdef __cplusplus } #endif
rkb_screen.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <complex.h> #include <assert.h> #include "cint.h" #include "cvhf.h" #include "optimizer.h" #include "np_helper/np_helper.h" #include "gto/gto.h" #define LL 0 #define SS 1 #define SL 2 #define LS 3 int int2e_spinor(); int int2e_spsp1spsp2_spinor(); int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int n = opt->nbas; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l]; double dmin = opt->direct_scf_cutoff / qijkl; return qijkl > opt->direct_scf_cutoff &&((opt->dm_cond[j*n+i] > dmin) || (opt->dm_cond[l*n+k] > dmin) || (opt->dm_cond[j*n+k] > dmin) || (opt->dm_cond[j*n+l] > dmin) || (opt->dm_cond[i*n+k] > dmin) || (opt->dm_cond[i*n+l] > dmin)); } int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt, double **dms_cond, int n_dm, double *dm_atleast, int *atm, int *bas, double *env) { int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int nbas = opt->nbas; int idm; double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l]; double *pdmscond = opt->dm_cond + nbas*nbas; for (idm = 0; idm < (n_dm+1)/2; idm++) { // note in _vhf.rdirect_mapdm, J and K share the same DM dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk } *dm_atleast = opt->direct_scf_cutoff / qijkl; return 1; } int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int n = opt->nbas; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double *dmsl = opt->dm_cond + n*n*SL; double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l]; double dmin = opt->direct_scf_cutoff / qijkl; return qijkl > opt->direct_scf_cutoff &&((opt->dm_cond[n*n*SS+j*n+i] > dmin) || (opt->dm_cond[l*n+k] > dmin) || (dmsl[j*n+k] > dmin) || (dmsl[j*n+l] > dmin) || (dmsl[i*n+k] > dmin) || (dmsl[i*n+l] > dmin)); } // be careful with the order in dms_cond, the current order (dmll, dmss, dmsl) // is consistent to the function _call_veff_ssll in dhf.py int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt, double **dms_cond, int n_dm, double *dm_atleast, int *atm, int *bas, double *env) { int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; int nbas = opt->nbas; int idm; double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l]; double *pdmscond = opt->dm_cond + 4*nbas*nbas; int nset = (n_dm+2) / 3; double *dmscondll = pdmscond + nset*nbas*nbas*LL; double *dmscondss = pdmscond + nset*nbas*nbas*SS; double *dmscondsl = pdmscond + nset*nbas*nbas*SL; for (idm = 0; idm < nset; idm++) { dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas; dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas; dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas; } *dm_atleast = opt->direct_scf_cutoff / qijkl; return 1; } static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { int shls_slice[] = {0, nbas}; const int cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel { double qtmp, tmp; int i, j, ij, di, dj, ish, jsh; int shls[4]; double *cache = malloc(sizeof(double) * cache_size); di = 0; for (ish = 0; ish < nbas; ish++) { dj = ao_loc[ish+1] - ao_loc[ish]; di = MAX(di, dj); } double complex *buf = malloc(sizeof(double complex) * di*di*di*di); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nbas*(nbas+1)/2; ij++) { ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7); jsh = ij - ish*(ish+1)/2; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[0] = ish; shls[1] = jsh; shls[2] = ish; shls[3] = jsh; qtmp = 1e-100; if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]); qtmp = MAX(qtmp, tmp); } } qtmp = sqrt(qtmp); } qcond[ish*nbas+jsh] = qtmp; qcond[jsh*nbas+ish] = qtmp; } free(buf); free(cache); } } void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->q_cond) { free(opt->q_cond); } opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas); assert(intor == &int2e_spinor); set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env); } void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->q_cond) { free(opt->q_cond); } opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas); assert(intor == &int2e_spsp1spsp2_spinor); set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env); } void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->q_cond) { free(opt->q_cond); } opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2); set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env); set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc, atm, natm, bas, nbas, env); } static void set_dmcond(double *dmcond, double *dmscond, double complex *dm, double direct_scf_cutoff, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { const size_t nao = ao_loc[nbas]; double dmax, dmaxi, tmp; int i, j, ish, jsh; int iset; double complex *pdm; for (ish = 0; ish < nbas; ish++) { for (jsh = 0; jsh <= ish; jsh++) { dmax = 0; for (iset = 0; iset < nset; iset++) { dmaxi = 0; pdm = dm + nao*nao*iset; for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) { for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) { tmp = .5 * (cabs(pdm[i*nao+j]) + cabs(pdm[j*nao+i])); dmaxi = MAX(dmaxi, tmp); } } dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi; dmscond[iset*nbas*nbas+jsh*nbas+ish] = dmaxi; dmax = MAX(dmax, dmaxi); } dmcond[ish*nbas+jsh] = dmax; dmcond[jsh*nbas+ish] = dmax; } } } // dm_cond ~ 1+nset, dm_cond + dms_cond void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call free(opt->dm_cond); } opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset)); NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*(1+nset)); // dmcond followed by dmscond which are max matrix element for each dm set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); } void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->dm_cond) { free(opt->dm_cond); } opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset)); NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*(1+nset)); set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); } // the current order of dmscond (dmll, dmss, dmsl) is consistent to the // function _call_veff_ssll in dhf.py void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->dm_cond) { free(opt->dm_cond); } if (nset < 3) { fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are " "required to set rkb prescreening\n"); exit(1); } nset = nset / 3; opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset)); NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*4*(1+nset)); // 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond int n2c = CINTtot_cgto_spinor(bas, nbas); double *dmcondll = opt->dm_cond + nbas*nbas*LL; double *dmcondss = opt->dm_cond + nbas*nbas*SS; double *dmcondsl = opt->dm_cond + nbas*nbas*SL; //double *dmcondls = opt->dm_cond + nbas*nbas*LS; double *pdmscond = opt->dm_cond + nbas*nbas*4; double *dmscondll = pdmscond + nset*nbas*nbas*LL; double *dmscondss = pdmscond + nset*nbas*nbas*SS; double *dmscondsl = pdmscond + nset*nbas*nbas*SL; //double *dmscondls = dmscond + nset*nbas*nbas*LS; double complex *dmll = dm + n2c*n2c*LL*nset; double complex *dmss = dm + n2c*n2c*SS*nset; double complex *dmsl = dm + n2c*n2c*SL*nset; //double complex *dmls = dm + n2c*n2c*LS*nset; set_dmcond(dmcondll, dmscondll, dmll, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); set_dmcond(dmcondss, dmscondss, dmss, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); set_dmcond(dmcondsl, dmscondsl, dmsl, opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env); }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; mutable IdentifierInfo *Ident_abstract; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector" /// and "bool" fast comparison. Only present if AltiVec or ZVector are /// enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; IdentifierInfo *Ident_Bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FPEvalMethodHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> MSFenvAccess; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result, Sema::ModuleImportState &ImportState); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, Sema::ModuleImportState &ImportState); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module; return ParseTopLevelDecl(Result, IS); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && Tok.getIdentifierInfo() != Ident_Bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser &p) : P(p), PrevPreferredType(P.PreferredType) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {} ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseSYCLUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>(), bool FailImmediatelyOnInvalidExpr = false, bool EarlyTypoCorrection = false); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, ForRangeInfo *FRI = nullptr, bool EnterForConditionScope = false); DeclGroupPtrTy ParseAliasDeclarationInInitStatement(DeclaratorContext Context, ParsedAttributesWithRange &Attrs); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); struct DesignatorCompletionInfo { SmallVectorImpl<Expr *> &InitExprs; QualType PreferredBaseType; }; ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, SourceLocation *LParenLoc, SourceLocation *RParenLoc); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return Tok.is(tok::kw_using) || isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); /// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of /// _BitInt as an extension when appropriate. void DiagnoseBitIntUse(const Token &Tok); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID, bool DiagnoseEmptyAttrs = false); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Emit warnings for C++11 and C2x attributes that are in a position that /// clang accepts as an extension. void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, LateParsedAttrList *LateAttrs = nullptr); void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseAttributes(WhichAttrKinds, AttrsWithRange, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); } /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, LateAttrs); return true; } return false; } bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributesWithRange attrs(AttrFactory); ParseGNUAttributes(attrs, LateAttrs, &D); D.takeAttributes(attrs, attrs.Range.getEnd()); } } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. bool MaybeParseGNUAttributes(ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(Attrs, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); return true; } return false; } bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(Attrs, LateAttrs); return true; } return false; } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. void ParseGNUAttributes(ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(AttrsWithRange, LateAttrs, D); Attrs.takeAllFrom(AttrsWithRange); } void ParseGNUAttributes(ParsedAttributesWithRange &Attrs, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) { // If parsing the attributes found an OpenMP directive, emit those tokens // to the parse stream now. if (!OpenMPTokens.empty()) { PP.EnterToken(Tok, /*IsReinject*/ true); PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true, /*IsReinject*/ true); ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true); } } void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); ParseCXX11Attributes(attrs); D.takeAttributes(attrs, attrs.Range.getEnd()); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange); attrs.takeAllFrom(attrsWithRange); return true; } return false; } bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(attrs); return true; } return false; } void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName, CachedTokens &OpenMPTokens); void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs, CachedTokens &OpenMPTokens, SourceLocation *EndLoc = nullptr); void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr) { CachedTokens OpenMPTokens; ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc); ReplayOpenMPAttributeTokens(OpenMPTokens); } void ParseCXX11Attributes(ParsedAttributesWithRange &attrs); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, CachedTokens &OpenMPTokens); IdentifierInfo *TryParseCXX11AttributeIdentifier( SourceLocation &Loc, Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None, const IdentifierInfo *EnclosingScope = nullptr); void MaybeParseMicrosoftAttributes(ParsedAttributes &Attrs) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseMicrosoftAttributes(AttrsWithRange); Attrs.takeAllFrom(AttrsWithRange); } } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributesWithRange &attrs); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributesWithRange &Attrs) { if (getLangOpts().DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributesWithRange &Attrs); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; bool isClassCompatibleKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributesWithRange &Attrs, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse an 'append_args' clause for '#pragma omp declare variant'. bool parseOpenMPAppendArgs( SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, OpenMPDirectiveKind EndDKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. /// \param ReadDirectiveWithinMetadirective true if directive is within a /// metadirective and therefore ends on the closing paren. StmtResult ParseOpenMPDeclarativeOrExecutableDirective( ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective = false); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses indirect clause /// \param ParseOnly true to skip the clause's semantic actions and return // false; bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); /// Parses clause with an interop variable of kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. // OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc, TemplateTy NameHint = nullptr); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs, TemplateTy Template, SourceLocation OpenLoc); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(Sema::ModuleImportState &ImportState); Decl *ParseModuleImport(SourceLocation AtLoc, Sema::ModuleImportState &ImportState); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
triplet_grid.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* These codes were originally parts of spglib, but only develped */ /* and used for phono3py. Therefore these were moved from spglib to */ /* phono3py. This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stddef.h> #include <stdlib.h> #include "bzgrid.h" #include "grgrid.h" #include "lagrid.h" #include "triplet.h" #include "triplet_grid.h" static long get_ir_triplets_at_q(long *map_triplets, long *map_q, const long grid_point, const long D_diag[3], const RotMats * rot_reciprocal, const long swappable); static long get_ir_triplets_at_q_perm_q1q2(long *map_triplets, const long *map_q, const long grid_point, const long D_diag[3], const RotMats * rot_reciprocal_q, const long num_ir_q); static long get_ir_triplets_at_q_noperm(long *map_triplets, const long *map_q, const long grid_point, const long D_diag[3], const RotMats * rot_reciprocal_q); static long get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point, const ConstBZGrid *bzgrid, const long *map_triplets); static void get_BZ_triplets_at_q_type1(long (*triplets)[3], const long grid_point, const ConstBZGrid *bzgrid, const long *ir_q1_gps, const long num_ir); static void get_BZ_triplets_at_q_type2(long (*triplets)[3], const long grid_point, const ConstBZGrid *bzgrid, const long *ir_q1_gps, const long num_ir); static double get_squared_distance(const long G[3], const double LQD_inv[3][3]); static void get_LQD_inv(double LQD_inv[3][3], const ConstBZGrid *bzgrid); static RotMats *get_point_group_reciprocal_with_q(const RotMats * rot_reciprocal, const long D_diag[3], const long grid_point); long tpk_get_ir_triplets_at_q(long *map_triplets, long *map_q, const long grid_point, const long D_diag[3], const long is_time_reversal, const long (*rec_rotations_in)[3][3], const long num_rot, const long swappable) { long i, num_ir; RotMats *rec_rotations, *rotations; rec_rotations = bzg_alloc_RotMats(num_rot); for (i = 0; i < num_rot; i++) { lagmat_copy_matrix_l3(rec_rotations->mat[i], rec_rotations_in[i]); } rotations = bzg_get_point_group_reciprocal(rec_rotations, is_time_reversal); bzg_free_RotMats(rec_rotations); num_ir = get_ir_triplets_at_q(map_triplets, map_q, grid_point, D_diag, rotations, swappable); bzg_free_RotMats(rotations); return num_ir; } long tpk_get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point, const ConstBZGrid *bzgrid, const long *map_triplets) { return get_BZ_triplets_at_q(triplets, grid_point, bzgrid, map_triplets); } static long get_ir_triplets_at_q(long *map_triplets, long *map_q, const long grid_point, const long D_diag[3], const RotMats * rot_reciprocal, const long swappable) { long i, num_ir_q, num_ir_triplets; long PS[3]; RotMats *rot_reciprocal_q; rot_reciprocal_q = NULL; for (i = 0; i < 3; i++) { PS[i] = 0; } /* Search irreducible q-points (map_q) with a stabilizer. */ rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, D_diag, grid_point); num_ir_q = bzg_get_ir_grid_map(map_q, D_diag, PS, rot_reciprocal_q); if (swappable) { num_ir_triplets = get_ir_triplets_at_q_perm_q1q2(map_triplets, map_q, grid_point, D_diag, rot_reciprocal_q, num_ir_q); } else { num_ir_triplets = get_ir_triplets_at_q_noperm(map_triplets, map_q, grid_point, D_diag, rot_reciprocal_q); } bzg_free_RotMats(rot_reciprocal_q); rot_reciprocal_q = NULL; return num_ir_triplets; } static long get_ir_triplets_at_q_perm_q1q2(long *map_triplets, const long *map_q, const long grid_point, const long D_diag[3], const RotMats * rot_reciprocal_q, const long num_ir_q) { long i, j, num_grid, num_ir_triplets, ir_gp, count; long adrs0[3], adrs1[3], adrs2[3]; long *ir_gps_at_q, *q_2; ir_gps_at_q = NULL; q_2 = NULL; num_ir_triplets = 0; num_grid = D_diag[0] * D_diag[1] * D_diag[2]; if ((q_2 = (long*) malloc(sizeof(long) * num_ir_q)) == NULL) { warning_print("Memory could not be allocated."); goto ret; } if ((ir_gps_at_q = (long*) malloc(sizeof(long) * num_ir_q)) == NULL) { warning_print("Memory could not be allocated."); goto ret; } count = 0; for (i = 0; i < num_grid; i++) { if (map_q[i] == i) { ir_gps_at_q[count] = i; count++; } } grg_get_grid_address_from_index(adrs0, grid_point, D_diag); #pragma omp parallel for private(j, adrs1, adrs2) for (i = 0; i < num_ir_q; i++) { grg_get_grid_address_from_index(adrs1, ir_gps_at_q[i], D_diag); for (j = 0; j < 3; j++) { /* q'' */ adrs2[j] = - adrs0[j] - adrs1[j]; } q_2[i] = grg_get_grid_index(adrs2, D_diag); } /* map_q[q_2[i]] is in ir_gps_at_q. */ /* If map_q[q_2[i]] < ir_gps_at_q[i], this should be already */ /* stored. So the counter is not incremented. */ for (i = 0; i < num_ir_q; i++) { ir_gp = ir_gps_at_q[i]; if (map_q[q_2[i]] < ir_gp) { map_triplets[ir_gp] = map_q[q_2[i]]; } else { map_triplets[ir_gp] = ir_gp; num_ir_triplets++; } } #pragma omp parallel for for (i = 0; i < num_grid; i++) { map_triplets[i] = map_triplets[map_q[i]]; } ret: if (q_2) { free(q_2); q_2 = NULL; } if (ir_gps_at_q) { free(ir_gps_at_q); ir_gps_at_q = NULL; } return num_ir_triplets; } static long get_ir_triplets_at_q_noperm(long *map_triplets, const long *map_q, const long grid_point, const long D_diag[3], const RotMats * rot_reciprocal_q) { long i, num_grid, num_ir_triplets; num_ir_triplets = 0; num_grid = D_diag[0] * D_diag[1] * D_diag[2]; for (i = 0; i < num_grid; i++) { if (map_q[i] == i) { map_triplets[i] = i; num_ir_triplets++; } else { map_triplets[i] = map_triplets[map_q[i]]; } } return num_ir_triplets; } static long get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point, const ConstBZGrid *bzgrid, const long *map_triplets) { long i, num_ir; long *ir_q1_gps; ir_q1_gps = NULL; num_ir = 0; if ((ir_q1_gps = (long*) malloc(sizeof(long) * bzgrid->size)) == NULL) { warning_print("Memory could not be allocated."); goto ret; } for (i = 0; i < bzgrid->size; i++) { if (map_triplets[i] == i) { ir_q1_gps[num_ir] = i; num_ir++; } } if (bzgrid->type == 1) { get_BZ_triplets_at_q_type1(triplets, grid_point, bzgrid, ir_q1_gps, num_ir); } else { get_BZ_triplets_at_q_type2(triplets, grid_point, bzgrid, ir_q1_gps, num_ir); } free(ir_q1_gps); ir_q1_gps = NULL; ret: return num_ir; } static void get_BZ_triplets_at_q_type1(long (*triplets)[3], const long grid_point, const ConstBZGrid *bzgrid, const long *ir_q1_gps, const long num_ir) { long i, j, gp2, num_gp, num_bzgp, bz0, bz1, bz2; long bzgp[3], G[3]; long bz_adrs0[3], bz_adrs1[3], bz_adrs2[3]; const long *gp_map; const long (*bz_adrs)[3]; double d2, min_d2, tolerance; double LQD_inv[3][3]; gp_map = bzgrid->gp_map; bz_adrs = bzgrid->addresses; get_LQD_inv(LQD_inv, bzgrid); /* This tolerance is used to be consistent to BZ reduction in bzgrid. */ tolerance = bzg_get_tolerance_for_BZ_reduction((BZGrid*)bzgrid); for (i = 0; i < 3; i++) { bz_adrs0[i] = bz_adrs[grid_point][i]; } num_gp = bzgrid->D_diag[0] * bzgrid->D_diag[1] * bzgrid->D_diag[2]; num_bzgp = num_gp * 8; #pragma omp parallel for private(j, gp2, bzgp, G, bz_adrs1, bz_adrs2, d2, min_d2, bz0, bz1, bz2) for (i = 0; i < num_ir; i++) { for (j = 0; j < 3; j++) { bz_adrs1[j] = bz_adrs[ir_q1_gps[i]][j]; bz_adrs2[j] = - bz_adrs0[j] - bz_adrs1[j]; } gp2 = grg_get_grid_index(bz_adrs2, bzgrid->D_diag); /* Negative value is the signal to initialize min_d2 later. */ min_d2 = -1; for (bz0 = 0; bz0 < gp_map[num_bzgp + grid_point + 1] - gp_map[num_bzgp + grid_point] + 1; bz0++) { if (bz0 == 0) { bzgp[0] = grid_point; } else { bzgp[0] = num_gp + gp_map[num_bzgp + grid_point] + bz0 - 1; } for (bz1 = 0; bz1 < gp_map[num_bzgp + ir_q1_gps[i] + 1] - gp_map[num_bzgp + ir_q1_gps[i]] + 1; bz1++) { if (bz1 == 0) { bzgp[1] = ir_q1_gps[i]; } else { bzgp[1] = num_gp + gp_map[num_bzgp + ir_q1_gps[i]] + bz1 - 1; } for (bz2 = 0; bz2 < gp_map[num_bzgp + gp2 + 1] - gp_map[num_bzgp + gp2] + 1; bz2++) { if (bz2 == 0) { bzgp[2] = gp2; } else { bzgp[2] = num_gp + gp_map[num_bzgp + gp2] + bz2 - 1; } for (j = 0; j < 3; j++) { G[j] = bz_adrs[bz0][j] + bz_adrs[bz1][j] + bz_adrs[bzgp[2]][j]; } if (G[0] == 0 && G[1] == 0 && G[2] == 0) { for (j = 0; j < 3; j++) { triplets[i][j] = bzgp[j]; } goto found; } d2 = get_squared_distance(G, LQD_inv); if (d2 < min_d2 - tolerance || min_d2 < 0) { min_d2 = d2; for (j = 0; j < 3; j++) { triplets[i][j] = bzgp[j]; } } } } } found: ; } } static void get_BZ_triplets_at_q_type2(long (*triplets)[3], const long grid_point, const ConstBZGrid *bzgrid, const long *ir_q1_gps, const long num_ir) { long i, j, gp0, gp2; long bzgp[3], G[3]; long bz_adrs0[3], bz_adrs1[3], bz_adrs2[3]; const long *gp_map; const long (*bz_adrs)[3]; double d2, min_d2, tolerance; double LQD_inv[3][3]; gp_map = bzgrid->gp_map; bz_adrs = bzgrid->addresses; get_LQD_inv(LQD_inv, bzgrid); /* This tolerance is used to be consistent to BZ reduction in bzgrid. */ tolerance = bzg_get_tolerance_for_BZ_reduction((BZGrid*)bzgrid); for (i = 0; i < 3; i++) { bz_adrs0[i] = bz_adrs[grid_point][i]; } gp0 = grg_get_grid_index(bz_adrs0, bzgrid->D_diag); #pragma omp parallel for private(j, gp2, bzgp, G, bz_adrs1, bz_adrs2, d2, min_d2) for (i = 0; i < num_ir; i++) { for (j = 0; j < 3; j++) { bz_adrs1[j] = bz_adrs[gp_map[ir_q1_gps[i]]][j]; bz_adrs2[j] = - bz_adrs0[j] - bz_adrs1[j]; } gp2 = grg_get_grid_index(bz_adrs2, bzgrid->D_diag); /* Negative value is the signal to initialize min_d2 later. */ min_d2 = -1; for (bzgp[0] = gp_map[gp0]; bzgp[0] < gp_map[gp0 + 1]; bzgp[0]++) { for (bzgp[1] = gp_map[ir_q1_gps[i]]; bzgp[1] < gp_map[ir_q1_gps[i] + 1]; bzgp[1]++) { for (bzgp[2] = gp_map[gp2]; bzgp[2] < gp_map[gp2 + 1]; bzgp[2]++) { for (j = 0; j < 3; j++) { G[j] = bz_adrs[bzgp[0]][j] + bz_adrs[bzgp[1]][j] + bz_adrs[bzgp[2]][j]; } if (G[0] == 0 && G[1] == 0 && G[2] == 0) { for (j = 0; j < 3; j++) { triplets[i][j] = bzgp[j]; } goto found; } d2 = get_squared_distance(G, LQD_inv); if (d2 < min_d2 - tolerance || min_d2 < 0) { min_d2 = d2; for (j = 0; j < 3; j++) { triplets[i][j] = bzgp[j]; } } } } } found: ; } } static double get_squared_distance(const long G[3], const double LQD_inv[3][3]) { double d, d2; long i; d2 = 0; for (i = 0; i < 3; i++) { d = LQD_inv[i][0] * G[0] + LQD_inv[i][1] * G[1] + LQD_inv[i][2] * G[2]; d2 += d * d; } return d2; } static void get_LQD_inv(double LQD_inv[3][3], const ConstBZGrid *bzgrid) { long i, j, k; /* LQD^-1 */ for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { LQD_inv[i][k] = bzgrid->reclat[i][j] * bzgrid->Q[j][k] / bzgrid->D_diag[k]; } } } } /* Return NULL if failed */ static RotMats *get_point_group_reciprocal_with_q(const RotMats * rot_reciprocal, const long D_diag[3], const long grid_point) { long i, num_rot, gp_rot; long *ir_rot; long adrs[3], adrs_rot[3]; RotMats * rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; num_rot = 0; grg_get_grid_address_from_index(adrs, grid_point, D_diag); if ((ir_rot = (long*)malloc(sizeof(long) * rot_reciprocal->size)) == NULL) { warning_print("Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { lagmat_multiply_matrix_vector_l3(adrs_rot, rot_reciprocal->mat[i], adrs); gp_rot = grg_get_grid_index(adrs_rot, D_diag); if (gp_rot == grid_point) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = bzg_alloc_RotMats(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { lagmat_copy_matrix_l3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; }
GB_unaryop__abs_bool_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_int8 // op(A') function: GB_tran__abs_bool_int8 // C type: bool // A type: int8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_int8 ( bool *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
critical_flet.h
/*++ Copyright (c) 2011 Microsoft Corporation Module Name: critical flet.cpp Abstract: Version of flet using "omp critical" directive. Warning: it uses omp critical section "critical_flet" Author: Leonardo de Moura (leonardo) 2011-05-12 Revision History: --*/ #ifndef CRITICAL_FLET_H_ #define CRITICAL_FLET_H_ template<typename T> class critical_flet { T & m_ref; T m_old_value; public: critical_flet(T & ref, const T & new_value): m_ref(ref), m_old_value(ref) { #pragma omp critical (critical_flet) { m_ref = new_value; } } ~critical_flet() { #pragma omp critical (critical_flet) { m_ref = m_old_value; } } }; #endif
templatemath.h
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ /* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <system/dll.h> #include <system/pointercast.h> #include <math/platformmath.h> #include <array/DataTypeUtils.h> #define BFLOAT16_MAX_VALUE 32737. #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifndef M_E #define M_E 2.718281828459 #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif namespace sd { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template <typename T> math_def inline bool nd4j_eq(T val1, T val2, double eps); template<typename T, typename Z> math_def inline Z nd4j_re(T val1, T val2); template<typename T, typename Z> math_def inline Z nd4j_rint(T val1); template<typename T, typename Z> math_def inline Z nd4j_copysign(T val1, T val2); template <typename T, typename Z> math_def inline Z nd4j_softplus(T val); template <typename T> math_def inline T nd4j_rotl(T val, T shift); template <typename T> math_def inline T nd4j_rotr(T val, T shift); //#ifndef __CUDACC__ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length); //#endif template<typename T, typename Z> math_def inline Z nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T, typename Z> math_def inline Z nd4j_cos(T val); template<typename T, typename Z> math_def inline Z nd4j_cosh(T val); template<typename X, typename Z> math_def inline Z nd4j_exp(X val); template<typename T, typename Z> math_def inline Z nd4j_floor(T val); template<typename X, typename Z> math_def inline Z nd4j_log(X val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2); template<typename T, typename Z> math_def inline Z nd4j_round(T val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X num, Y denom); template<typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X num, Y denom); template<typename T, typename Z> math_def inline Z nd4j_erf(T num); template<typename T, typename Z> math_def inline Z nd4j_erfc(T num); math_def inline int32_t floatToRawIntBits(float d) { union { float f; int32_t i; } tmp; tmp.f = d; return tmp.i; } math_def inline float intBitsToFloat(int32_t i) { union { float f; int32_t i; } tmp; tmp.i = i; return tmp.f; } math_def inline float mulsignf(float x, float y) { return intBitsToFloat(floatToRawIntBits(x) ^ (floatToRawIntBits(y) & (1 << 31))); } math_def inline float copysignfk(float x, float y) { return intBitsToFloat((floatToRawIntBits(x) & ~(1 << 31)) ^ (floatToRawIntBits(y) & (1 << 31))); } template<typename T, typename Z> math_def inline Z nd4j_sigmoid(T val) { return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val)); } template<typename T, typename Z> math_def inline Z nd4j_elu(T val, T alpha) { if (val >= (T) 0.f) return val; return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f)); } template<typename T, typename Z> math_def inline Z nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; } template<typename T, typename Z> math_def inline Z nd4j_eluderivative(T val, T alpha) { if (val >= static_cast<T>(0.0f)) return static_cast<Z>(1.0f); return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T, typename Z> math_def inline Z nd4j_sin(T val); template<typename T, typename Z> math_def inline Z nd4j_sinh(T val); template<typename T, typename Z> math_def inline Z nd4j_softplus(T val) { return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val)); } template<typename T, typename Z> math_def inline Z nd4j_softsign(T val) { return val / ((T) 1.0f + sd::math::nd4j_abs<T>(val)); } template<typename X, typename Z> math_def inline Z nd4j_sqrt(X val); template<typename X, typename Z> math_def inline Z nd4j_tanh(X val); template<typename T, typename Z> math_def inline Z nd4j_tan(T val); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2) { return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2)); } template<typename T, typename Z> math_def inline Z nd4j_tan(T tval) { return p_tan<Z>(static_cast<Z>(tval)); } template<typename T, typename Z> math_def inline Z nd4j_tanhderivative(T val) { Z tanh = nd4j_tanh<T,Z>(val); return (Z) 1.0f - tanh * tanh; } template <typename T, typename Z> math_def inline T nd4j_sigmoidderivative(T val) { Z sigmoid = nd4j_sigmoid<T,Z>(val); return sigmoid * ((Z) 1.0f - sigmoid); } template<typename T, typename Z> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (Z) 1.0f / (y * y); } template<typename T, typename Z> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f; } template<typename T, typename Z> math_def inline Z nd4j_sign(T val) { return nd4j_sgn<T, Z>(val); } template<typename T, typename Z> math_def inline Z nd4j_signum(T val) { return nd4j_sgn<T, Z>(val); } template<typename X, typename Z> math_def inline Z nd4j_gamma(X a); template<typename X, typename Z> math_def inline Z nd4j_lgamma(X x); //#ifndef __CUDACC__ /* template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } */ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length) { Z dot = (Z)0.0f; for(int e = 0; e < length; e++) { dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]); } return dot; } //#endif template<typename T, typename Z> math_def inline Z nd4j_acos(T val); template<typename T, typename Z> math_def inline Z nd4j_sech(T val); template<typename T, typename Z> math_def inline Z nd4j_acosh(T val); template<typename T, typename Z> math_def inline Z nd4j_asin(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val); } template<typename T, typename Z> math_def inline Z nd4j_atan(T val); template<typename T, typename Z> math_def inline Z nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); } else return value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) { return (bfloat16) fabsf((float) value); } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return fabs(value); } template<> math_def inline int nd4j_abs<int>(int value) { return abs(value); } template<> math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) { return llabs(value); } template<> math_def inline bool nd4j_abs<bool>(bool value) { return value; } template<> math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) { return value; } template<> math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) { return value; } template<> math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) { return value; } template<> math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) { return value; } template<> math_def inline int8_t nd4j_abs<int8_t>(int8_t value) { return value < 0 ? -value : value; } template<> math_def inline int16_t nd4j_abs<int16_t>(int16_t value) { return value < 0 ? -value : value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) { return value == bfloat16::nan(); //0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isnan<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isnan<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isnan<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) { return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isinf<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isinf<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isinf<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1)); else return nd4j_abs<Nd4jLong>(val1); } template<> math_def inline bool nd4j_max(bool val1, bool val2) { return (val1 || val2) ? true : false; } template<typename T> math_def inline T nd4j_max(T val1, T val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline bool nd4j_min(bool val1, bool val2) { return (val1 && val2) ? true : false; } template<typename T> math_def inline T nd4j_min(T val1, T val2) { return val1 < val2 ? val1 : val2; } template <typename T> math_def inline bool nd4j_eq(T d1, T d2, double eps) { if (sd::math::nd4j_isinf<T>(d1) && sd::math::nd4j_isinf<T>(d2)) { if (d1 > 0 && d2 > 0) return true; else if (d1 < 0 && d2 < 0) return true; else return false; } auto diff = static_cast<double>(sd::math::nd4j_abs<T>(d1 - d2)); // works well except in the range of very large numbers if (diff <= eps) return true; // Knuth approach // works well except in the range of very small numbers if (diff <= sd::math::nd4j_max<double>(sd::math::nd4j_abs<double>(static_cast<double>(d1)), sd::math::nd4j_abs<double>(static_cast<double>(d2))) * eps) return true; return false; } template <typename X, typename Z> math_def inline Z nd4j_ceil(X val) { return static_cast<Z>(p_ceil<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_round(X val) { return static_cast<Z>(p_round<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_asin(X val) { return p_asin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atan(X val) { return p_atan<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atanh(X val) { return p_atanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cosh(X val) { return p_cosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_rint(X val) { return p_rint<X>(val); } template <typename X, typename Z> math_def inline Z nd4j_sinh(X val) { return p_sinh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acos(X val) { return p_acos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sech(X val) { return static_cast<Z>(1) / nd4j_cosh<X,Z>(val); } template <typename X, typename Z> math_def inline Z nd4j_acosh(X val) { return p_acosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cos(X val) { return p_cos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_exp(X val) { return p_exp<X>(val); } template<typename X, typename Z> math_def inline Z nd4j_floor(X val) { return static_cast<Z>(p_floor<X>(val)); } template<typename X, typename Z> math_def inline Z nd4j_log(X val) { return static_cast<Z>(p_log<X>(val)); } /** * This func is special case - it must return floating point value, and optionally Y arg can be floating point argument * @tparam X * @tparam Y * @tparam Z * @param val * @param val2 * @return */ template <> math_def inline float nd4j_pow(float val, float val2) { return p_pow<float>(val, val2); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2) { return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } /** * LogGamma(a) - float point extension of ln(n!) **/ template <typename X, typename Z> math_def inline Z nd4j_lgamma(X x) { // if (x <= X(0.0)) // { // std::stringstream os; // os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given."; // throw std::invalid_argument( os.str() ); // } if (x < X(12.0)) { return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x)); } // Abramowitz and Stegun 6.1.41 // Asymptotic series should be good to at least 11 or 12 figures // For error analysis, see Whittiker and Watson // A Course in Modern Analysis (1927), page 252 static const double c[8] = { 1.0/12.0, -1.0/360.0, 1.0/1260.0, -1.0/1680.0, 1.0/1188.0, -691.0/360360.0, 1.0/156.0, -3617.0/122400.0 }; double z = Z(1.0 / Z(x * x)); double sum = c[7]; for (int i = 6; i >= 0; i--) { sum *= z; sum += c[i]; } double series = sum / Z(x); static const double halfLogTwoPi = 0.91893853320467274178032973640562; return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series); } template<typename T> math_def inline T nd4j_re(T val1, T val2) { if (val1 == (T) 0.0f && val2 == (T) 0.0f) return (T) 0.0f; return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X val, Y val2) { return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X val, Y val2) { return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Z> math_def inline Z nd4j_sin(X val) { return p_sin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sqrt(X val) { return p_sqrt<Z>(static_cast<Z>(val)); } template <typename X> math_def inline X neg_tanh(X val) { X o = static_cast<X>(1.0f); X t = static_cast<X>(2.0f); X e = static_cast<X>(M_E); auto p = sd::math::nd4j_pow<X, X, X>(e, val * t); return (p - o)/ (p + o); } template <typename X> math_def inline X pos_tanh(X val) { X o = static_cast<X>(1.0f); X t = static_cast<X>(-2.0f); X e = static_cast<X>(M_E); auto p = sd::math::nd4j_pow<X, X, X>(e, val * t); return (o - p) / (o + p); } math_def inline float neu_tanh(float val, float sign) { float e(M_E); float av = sign * val; auto p = sd::math::nd4j_pow<float, float, float>(e, -av * 2.f); return (1 - p) / (1 + p); } template <> math_def inline float nd4j_tanh(float val) { float sign = copysignfk(1.0f, val); return sign * neu_tanh(val, sign); } template <typename X, typename Z> math_def inline Z nd4j_tanh(X val) { return val <= 0 ? neg_tanh(val) : pos_tanh(val); } template <typename T> math_def inline T nd4j_rotl(T val, T shift) { return p_rotl<T>(val, shift); } template <typename T> math_def inline T nd4j_rotr(T val, T shift) { return p_rotr<T>(val, shift); } template <typename X, typename Z> math_def inline Z nd4j_erf(X val) { return p_erf<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erfc(X val) { return p_erfc<Z>(static_cast<Z>(val)); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; template <typename X, typename Z> math_def inline Z nd4j_gamma(X a) { // nd4j_lgamma<X,Z>(a); // return (Z)std::tgamma(a); // Split the function domain into three intervals: // (0, 0.001), [0.001, 12), and (12, infinity) /////////////////////////////////////////////////////////////////////////// // First interval: (0, 0.001) // // For small a, 1/Gamma(a) has power series a + gamma a^2 - ... // So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3. // The relative error over this interval is less than 6e-7. const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant if (a < X(0.001)) return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a))); /////////////////////////////////////////////////////////////////////////// // Second interval: [0.001, 12) if (a < X(12.0)) { // The algorithm directly approximates gamma over (1,2) and uses // reduction identities to reduce other arguments to this interval. double y = (double)a; int n = 0; bool argWasLessThanOne = y < 1.0; // Add or subtract integers as necessary to bring y into (1,2) // Will correct for this below if (argWasLessThanOne) { y += 1.0; } else { n = static_cast<int>(floor(y)) - 1; // will use n later y -= n; } // numerator coefficients for approximation over the interval (1,2) static const double p[] = { -1.71618513886549492533811E+0, 2.47656508055759199108314E+1, -3.79804256470945635097577E+2, 6.29331155312818442661052E+2, 8.66966202790413211295064E+2, -3.14512729688483675254357E+4, -3.61444134186911729807069E+4, 6.64561438202405440627855E+4 }; // denominator coefficients for approximation over the interval (1,2) static const double q[] = { -3.08402300119738975254353E+1, 3.15350626979604161529144E+2, -1.01515636749021914166146E+3, -3.10777167157231109440444E+3, 2.25381184209801510330112E+4, 4.75584627752788110767815E+3, -1.34659959864969306392456E+5, -1.15132259675553483497211E+5 }; double num = 0.0; double den = 1.0; double z = y - 1; for (auto i = 0; i < 8; i++) { num = (num + p[i]) * z; den = den * z + q[i]; } double result = num / den + 1.0; // Apply correction if argument was not initially in (1,2) if (argWasLessThanOne) { // Use identity gamma(z) = gamma(z+1)/z // The variable "result" now holds gamma of the original y + 1 // Thus we use y-1 to get back the orginal y. result /= (y - 1.0); } else { // Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z) for (auto i = 0; i < n; i++) result *= y++; } return Z(result); } /////////////////////////////////////////////////////////////////////////// // Third interval: [12, infinity) if (a > 171.624) { // Correct answer too large to display. Force +infinity. return Z(DOUBLE_MAX_VALUE); // return DataTypeUtils::infOrMax<Z>(); } return sd::math::nd4j_exp<Z,Z>(sd::math::nd4j_lgamma<X,Z>(a)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_igamma(X a, Y x) { Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a)); auto sum = Z(0.); auto denom = Z(1.); if (a <= X(0.000001)) //throw std::runtime_error("Cannot calculate gamma for a zero val."); return Z(0); for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) { denom *= (a + i); sum += nd4j_pow<X, int, Z>(x, i) / denom; } return aim * sum; } template <typename X, typename Y, typename Z> math_def inline Z nd4j_igammac(X a, Y x) { return Z(1.) - nd4j_igamma<X, Y, Z>(a, x); } #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMin(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMax(T* address, T val); template <> inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) { return atomicMin(address, val); } template <> inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) { return atomicMin(address, val); } template <> inline __device__ float nd4j_atomicMin<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = __float_as_int(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ double nd4j_atomicMin<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) { #if __CUDA_ARCH__ >= 350 return atomicMin((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { #if __CUDA_ARCH__ >= 350 return atomicMin((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = (unsigned long long)val, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) { int32_t temp = *address; *address = atomicMin(&temp, (int)val); return *address; } template <> inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) { return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data)); } template <> inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) { return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data)); } template <> inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) { return atomicMax(address, val); } template <> inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) { return atomicMax(address, val); } template <> inline __device__ double nd4j_atomicMax<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicMax<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = __float_as_int(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) { uint32_t temp = *address; *address = atomicMin(&temp, (uint32_t)val); return *address; } template <> inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) { int32_t temp = *address; *address = atomicMin(&temp, (int)val); return *address; } template <> inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) { uint32_t temp = *address; *address = atomicMin(&temp, (uint32_t)val); return *address; } template <> inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) { uint32_t temp = *address; *address = atomicMax(&temp, (uint32_t)val); return *address; } template <> inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) { int32_t temp = *address; *address = atomicMax(&temp, (int)val); return *address; } template <> inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) { uint32_t temp = *address; *address = atomicMax(&temp, (uint32_t)val); return *address; } template <> inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) { int32_t temp = *address; *address = atomicMax(&temp, (int32_t)val); return *address; } template <> inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) { auto address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = nd4j_max((float16) old.B.H, val); fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = nd4j_max((float16) old.B.L, val); fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) { auto address_as_ull = (int*) address; long addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = nd4j_max(old.B.H, val); fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = nd4j_max(old.B.L, val); fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) { #if __CUDA_ARCH__ >= 350 return atomicMax((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; //return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed)); } while (assumed != old); return old; } template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; //return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, val + assumed); } while (assumed != old); return old; } template <> inline __device__ long nd4j_atomicAdd<long>(long* address, long val) { unsigned long long* address_as_ull = (unsigned long long int *) address; // return atomicAdd(address, val); unsigned long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, val + assumed); } while (assumed != old); return old; } template <> inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) { return atomicAdd(address, val); } template <> inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) { // unsigned long long* address_as_ull = (unsigned long long int *) address; // //// return atomicAdd(address, val); // unsigned long int old = *address_as_ull, assumed; // do { // assumed = old; // old = atomicCAS(address_as_ull, assumed, val + assumed); // } while (assumed != old); // return old; return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { #if __CUDA_ARCH__ >= 700 && CUDA_VERSION_MAJOR >=10 atomicAdd(reinterpret_cast<__half*>(address), val.data); #else auto address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; #endif } template <> inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) { auto address_as_ull = (int*) address; auto addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = old.B.H + val; fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = old.B.L + val; fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <typename T> static inline __device__ T internal_16bit_atomicAdd(T* address, T val) { size_t shift = ((size_t)address & 2); int *base_address = (int *)((char*)address - shift); union I16PAIR { struct { T H; T L; } B; int W; __host__ __device__ I16PAIR() {}; __host__ __device__ ~I16PAIR() {}; }; I16PAIR pairNew, pairOld, pairAssumed; if (reinterpret_cast<int*>(address) == base_address) { pairOld.B.L = val; do { pairNew.B.L = pairOld.B.L; pairNew.B.H = pairOld.B.H + val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.H; } else { pairOld.B.H = val; do { pairNew.B.H = pairOld.B.H; pairNew.B.L = pairOld.B.L + val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.L; } } template <> inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) { return internal_16bit_atomicAdd<int16_t>(address, val); } template <> inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) { return internal_16bit_atomicAdd<uint16_t>(address, val); } template <> inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) { int res = *address; atomicAdd(&res, (int)val); *address = res; return *address; } template <> inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) { int res = *address; atomicAdd(&res, (int)val); *address = res; return *address; } template <> inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) { *address += (val); return *address; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { return nd4j_atomicAdd<double>(address, -val); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { return nd4j_atomicMul<double>(address, 1./val); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } //template <> //inline __device__ int nd4j_atomicAdd<int>(int* address, int val) { // return atomicAdd(address, val); //} template <> inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) { return (int32_t)atomicAdd((int*)address, (int)val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { return nd4j_atomicAdd<float>(address, -val); } template <> inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) { return nd4j_atomicAdd<float16>(address, -val); } template <> inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) { return nd4j_atomicAdd<bfloat16>(address, -val); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __int_as_float(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~3); unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int sel = selectors[(size_t)address & 3]; unsigned int old, assumed, mul, new_; old = *base_address; do { assumed = old; mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440); new_ = __byte_perm(old, mul, sel); if (new_ == old) break; old = atomicCAS(base_address, assumed, new_); } while (assumed != old); return (int8_t)old; } template <> inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~3); unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int sel = selectors[(size_t)address & 3]; unsigned int old, assumed, mul, new_; old = *base_address; do { assumed = old; mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440); new_ = __byte_perm(old, mul, sel); if (new_ == old) break; old = atomicCAS(base_address, assumed, new_); } while (assumed != old); return (uint8_t)old; } template <typename T> static inline __device__ T internal_16bit_atomicMul(T* address, T val) { size_t shift = ((size_t)address & 2); int *base_address = (int *)((char*)address - shift); union I16PAIR { struct { T H; T L; } B; int W; __host__ __device__ I16PAIR() {}; __host__ __device__ ~I16PAIR() {}; }; I16PAIR pairNew, pairOld, pairAssumed; if (reinterpret_cast<int*>(address) == base_address) { pairOld.B.L = val; do { pairNew.B.L = pairOld.B.L; pairNew.B.H = pairOld.B.H * val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.H; } else { pairOld.B.H = val; do { pairNew.B.H = pairOld.B.H; pairNew.B.L = pairOld.B.L * val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.L; } } template <> inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) { return internal_16bit_atomicMul<int16_t>(address, val); } template <> inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) { return internal_16bit_atomicMul<uint16_t>(address, val); } template <> inline __device__ int nd4j_atomicMul<int>(int* address, int val) { int* res_address = address; int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return old; } template <> inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) { unsigned int* res_address = address; unsigned int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return old; } template <> inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) { unsigned long long int* res_address = (unsigned long long int*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (int64_t)old; } template <> inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) { unsigned long long int* res_address = (unsigned long long int*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (uint64_t)old; } #if !defined(_WIN32) && !defined(_WIN64) template <> inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* res_address = (unsigned long long*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (Nd4jLong)old; } #endif template <> inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) { return internal_16bit_atomicMul<bfloat16>(address, val); } template <> inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) { return internal_16bit_atomicMul<float16>(address, val); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { return nd4j_atomicMul<float>(address, 1.f / val); } template <> inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) { return internal_16bit_atomicMul<float16>(address, (float16) 1.f / val); } template <> inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) { return internal_16bit_atomicMul<bfloat16>(address, (bfloat16) 1 / val); } } #endif } } #ifdef _OPENMP #ifndef MAX_FLOAT #define MAX_FLOAT 1e37 #endif #pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \ omp_out = sd::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \ omp_out = sd::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = sd::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = sd::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = sd::math::nd4j_max(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = sd::math::nd4j_min(sd::math::nd4j_abs(omp_in), sd::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = sd::math::nd4j_abs(omp_in) + sd::math::nd4j_abs(omp_out))\ initializer (omp_priv=0) #pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0) #pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in * omp_out)\ initializer (omp_priv=1) #endif #endif /* TEMPLATEMATH_H_ */
8259.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for simd schedule(dynamic, 28) num_threads(28) private(j) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
pcs.c
/** @file pcs.c * @brief Functions relative to the Parallel Collision Search algorithm. * * Created by Monika Trimoska on 03/12/2015. * Copyright © 2015 Monika Trimoska. All rights reserved. */ #include <stdio.h> #include <stdint.h> #include <math.h> #include <gmp.h> #include <omp.h> #include <stdlib.h> #include <time.h> #include <string.h> #include <sys/time.h> #include "pcs_elliptic_curve_operations.h" #include "pcs_pollard_rho.h" #include "pcs_storage.h" #include "pcs.h" elliptic_curve_t E; point_t P; point_t Q; mpz_t n; mpz_t *A; mpz_t *B; point_t M[__NB_ENSEMBLES__]; uint8_t trailling_bits; uint8_t nb_bits; /** Determines whether a point is a distinguished one. * * @param[in] R A point on an elliptic curve. * @param[in] trailling_bits Number of trailling zero bits in a ditinguished point. * @param[out] q The x-coordinate, without the trailling zeros. * @return 1 if the point is distinguished, 0 otherwise. */ int is_distinguished(point_t R, int trailling_bits, mpz_t *q) { int res; mpz_t r; mpz_inits(r, NULL); mpz_tdiv_qr_ui(*q, r, R.x, (unsigned long int)pow(2, trailling_bits)); res=(mpz_sgn(r) == 0); mpz_clears(r, NULL); return (res); } /** Checks if the linear combination aP+bQ is equal to R or its inverse. * * @param[in] R A point on an elliptic curve. * @param[in] a a coefficient. * @param[in] b b coefficient. * @return 1 if aP+bQ = R, 0 if aP+bQ = -R. */ int same_point(point_t R, mpz_t a, mpz_t b) { int res; point_t *S1, *S2, *S; if(!preallocation_init_done) { preallocation_init(); } S1 = &(temp_point[omp_get_thread_num()][1]); S2 = &(temp_point[omp_get_thread_num()][2]); S = &(temp_point[omp_get_thread_num()][3]); double_and_add(S1, P, a, E); double_and_add(S2, Q, b, E); add(S, *S1, *S2, E); res=(mpz_cmp(R.y, S->y) == 0); return res; } /** Computes the linear combination aP+bQ on E. * * @param[out] R Resulting point. * @param[in] a a coefficient. * @param[in] b b coefficient. */ void lin_comb(point_t * R, mpz_t a, mpz_t b) { point_t *S1, *S2; if(!preallocation_init_done) { preallocation_init(); } S1 = &(temp_point[omp_get_thread_num()][1]); S2 = &(temp_point[omp_get_thread_num()][2]); double_and_add(S1, P, a, E); double_and_add(S2, Q, b, E); add(R, *S1, *S2, E); } /** Checks if there is a collision. * */ int is_collision(mpz_t x, mpz_t a1, mpz_t a2, int trailling_bits) { uint8_t r; mpz_t *xDist_; int retval = 0; mpz_t *b1, *b2; point_t *R; if(!preallocation_init_done) { preallocation_init(); } b1 = &(temp_obj[omp_get_thread_num()][9]); b2 = &(temp_obj[omp_get_thread_num()][10]); xDist_ = &(temp_obj[omp_get_thread_num()][11]); R = &(temp_point[omp_get_thread_num()][4]); mpz_set_ui(*b2, 0); mpz_set_ui(*b1, 0); double_and_add(R, P, a1, E); //recompute first a,b pair while(!is_distinguished(*R, trailling_bits, xDist_)) { r = hash(R->y); compute_a(a1, A[r], n); compute_b(*b1, B[r], n); f(*R, M[r], R, E); } //recompute second a,b pair double_and_add(R, P, a2, E); while(!is_distinguished(*R, trailling_bits, xDist_)) { r = hash(R->y); compute_a(a2, A[r], n); compute_b(*b2, B[r], n); f(*R, M[r], R, E); } if(mpz_cmp(*b1, *b2) != 0) //we found two different pairs, so collision { if(!same_point(*R, a1, *b1)) //it's the inverse point { mpz_neg(a2, a2); mpz_mmod(a2, a2, n); mpz_neg(*b2, *b2); mpz_mmod(*b2, *b2, n); } compute_x(x, a1, a2, *b1, *b2, n); retval = 1; } return retval; } /** Initialize all variables needed to do a PCS algorithm. * */ void pcs_init(point_t P_init, point_t Q_init, elliptic_curve_t E_init, mpz_t n_init, mpz_t *A_init, mpz_t *B_init, uint8_t nb_bits_init, uint8_t trailling_bits_init, int type_struct, int nb_threads, uint8_t level) { uint8_t i; point_init(&P); point_init(&Q); curve_init(&E); mpz_init(n); mpz_set(P.x, P_init.x); mpz_set(P.y, P_init.y); mpz_set(P.z, P_init.z); mpz_set(Q.x, Q_init.x); mpz_set(Q.y, Q_init.y); mpz_set(Q.z, Q_init.z); mpz_set(E.A, E_init.A); mpz_set(E.B, E_init.B); mpz_set(E.p, E_init.p); mpz_set(n, n_init); A = A_init; B = B_init; for(i=0; i<__NB_ENSEMBLES__; i++) { mpz_inits(M[i].x,M[i].y,M[i].z,NULL); lin_comb(&M[i],A[i],B[i]); } trailling_bits = trailling_bits_init; nb_bits = nb_bits_init; struct_init(type_struct, n, trailling_bits, nb_bits, nb_threads, level); } /** Run the PCS algorithm. * */ long long int pcs_run(mpz_t x_res, int nb_threads, int nb_collisions) { point_t R; mpz_t a, a2; mpz_t x, xDist; uint8_t r; int trail_length; int trail_length_max = pow(2, trailling_bits) * 20; int collision_count = 0; char xDist_str[50]; #pragma omp parallel private(R, a, a2, x, r, xDist, xDist_str, trail_length) shared(collision_count, x_res, trail_length_max) num_threads(nb_threads) { point_init(&R); mpz_inits(x, a2, a, xDist, NULL); //Initialize a starting point gmp_randstate_t r_state; gmp_randinit_default(r_state); gmp_randseed_ui(r_state, time(NULL) * (omp_get_thread_num() + 1)); mpz_urandomb(a, r_state, nb_bits); double_and_add(&R, P, a, E); trail_length = 0; while(collision_count < nb_collisions) { if(is_distinguished(R, trailling_bits, &xDist)) { if(struct_add(a2, a, xDist, xDist_str)) { if(is_collision(x, a, a2, trailling_bits)) { #pragma omp critical { collision_count++; mpz_set(x_res, x); } } } mpz_urandomb(a, r_state, nb_bits); double_and_add(&R, P, a, E); trail_length = 0; } else { r=hash(R.y); f(R, M[r], &R, E); trail_length++; if(trail_length > trail_length_max) { mpz_urandomb(a, r_state, nb_bits); double_and_add(&R, P, a, E); trail_length = 0; } } } point_clear(&R); mpz_clears(a, a2, x, xDist, NULL); gmp_randclear(r_state); } return 0; } /** Free all variables used in the previous PCS run. * */ void pcs_clear() { uint8_t i; point_clear(&P); point_clear(&Q); curve_clear(&E); mpz_clear(n); for(i = 0; i < __NB_ENSEMBLES__; i++) { mpz_clears(M[i].x, M[i].y, M[i].z, NULL); } struct_free(); }
text_parser.h
/*! * Copyright (c) 2015 by Contributors * \file text_parser.h * \brief iterator parser to parse text format * \author Tianqi Chen */ #ifndef DMLC_DATA_TEXT_PARSER_H_ #define DMLC_DATA_TEXT_PARSER_H_ #include <dmlc/data.h> #include <dmlc/omp.h> #include <vector> #include <cstring> #include <algorithm> #include "./row_block.h" #include "./parser.h" namespace dmlc { namespace data { /*! * \brief Text parser that parses the input lines * and returns rows in input data */ template <typename IndexType> class TextParserBase : public ParserImpl<IndexType> { public: explicit TextParserBase(InputSplit *source, int nthread) : bytes_read_(0), source_(source) { int maxthread; #pragma omp parallel { maxthread = std::max(omp_get_num_procs() / 2 - 4, 1); } nthread_ = std::min(maxthread, nthread); } virtual ~TextParserBase() { delete source_; } virtual void BeforeFirst(void) { source_->BeforeFirst(); } virtual size_t BytesRead(void) const { return bytes_read_; } virtual bool ParseNext(std::vector<RowBlockContainer<IndexType> > *data) { return FillData(data); } protected: /*! * \brief parse data into out * \param begin beginning of buffer * \param end end of buffer */ virtual void ParseBlock(char *begin, char *end, RowBlockContainer<IndexType> *out) = 0; /*! * \brief read in next several blocks of data * \param data vector of data to be returned * \return true if the data is loaded, false if reach end */ inline bool FillData(std::vector<RowBlockContainer<IndexType> > *data); /*! * \brief start from bptr, go backward and find first endof line * \param bptr end position to go backward * \param begin the beginning position of buffer * \return position of first endof line going backward */ inline char* BackFindEndLine(char *bptr, char *begin) { for (; bptr != begin; --bptr) { if (*bptr == '\n' || *bptr == '\r') return bptr; } return begin; } private: // nthread int nthread_; // number of bytes readed size_t bytes_read_; // source split that provides the data InputSplit *source_; }; // implementation template <typename IndexType> inline bool TextParserBase<IndexType>:: FillData(std::vector<RowBlockContainer<IndexType> > *data) { InputSplit::Blob chunk; if (!source_->NextChunk(&chunk)) return false; const int nthread = omp_get_max_threads(); // reserve space for data data->resize(nthread); bytes_read_ += chunk.size; CHECK_NE(chunk.size, 0U); char *head = reinterpret_cast<char*>(chunk.dptr); #pragma omp parallel num_threads(nthread) { // threadid int tid = omp_get_thread_num(); size_t nstep = (chunk.size + nthread - 1) / nthread; size_t sbegin = std::min(tid * nstep, chunk.size); size_t send = std::min((tid + 1) * nstep, chunk.size); char *pbegin = BackFindEndLine(head + sbegin, head); char *pend; if (tid + 1 == nthread) { pend = head + send; } else { pend = BackFindEndLine(head + send, head); } ParseBlock(pbegin, pend, &(*data)[tid]); } this->data_ptr_ = 0; return true; } } // namespace data } // namespace dmlc #endif // DMLC_DATA_TEXT_PARSER_H_
fft5d.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2012, The GROMACS development team, * check out http://www.gromacs.org for more information. * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef NOGMX #define GMX_PARALLEL_ENV_INITIALIZED 1 #else #ifdef GMX_MPI #define GMX_PARALLEL_ENV_INITIALIZED 1 #else #define GMX_PARALLEL_ENV_INITIALIZED 0 #endif #endif #ifdef GMX_LIB_MPI #include <mpi.h> #endif #ifdef GMX_THREAD_MPI #include "tmpi.h" #endif #ifdef GMX_OPENMP /* TODO: Do we still need this? Are we still planning ot use fftw + OpenMP? */ #define FFT5D_THREADS /* requires fftw compiled with openmp */ /* #define FFT5D_FFTW_THREADS (now set by cmake) */ #endif #include "fft5d.h" #include <float.h> #include <math.h> #include <assert.h> #include "smalloc.h" #ifndef __FLT_EPSILON__ #define __FLT_EPSILON__ FLT_EPSILON #define __DBL_EPSILON__ DBL_EPSILON #endif #ifdef NOGMX FILE* debug = 0; #endif #include "gmx_fatal.h" #ifdef GMX_FFT_FFTW3 #ifdef GMX_THREAD_MPI /* none of the fftw3 calls, except execute(), are thread-safe, so we need to serialize them with this mutex. */ static tMPI_Thread_mutex_t big_fftw_mutex = TMPI_THREAD_MUTEX_INITIALIZER; #define FFTW_LOCK tMPI_Thread_mutex_lock(&big_fftw_mutex) #define FFTW_UNLOCK tMPI_Thread_mutex_unlock(&big_fftw_mutex) #else /* GMX_THREAD_MPI */ #define FFTW_LOCK #define FFTW_UNLOCK #endif /* GMX_THREAD_MPI */ #endif /* GMX_FFT_FFTW3 */ static double fft5d_fmax(double a, double b) { return (a > b) ? a : b; } /* largest factor smaller than sqrt */ static int lfactor(int z) { int i; for (i = sqrt(z);; i--) { if (z%i == 0) { return i; } } return 1; } /* largest factor */ static int l2factor(int z) { int i; if (z == 1) { return 1; } for (i = z/2;; i--) { if (z%i == 0) { return i; } } return 1; } /* largest prime factor: WARNING: slow recursion, only use for small numbers */ static int lpfactor(int z) { int f = l2factor(z); if (f == 1) { return z; } return fft5d_fmax(lpfactor(f), lpfactor(z/f)); } #ifndef GMX_MPI #ifdef HAVE_GETTIMEOFDAY #include <sys/time.h> double MPI_Wtime() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec+tv.tv_usec*1e-6; } #else double MPI_Wtime() { return 0.0; } #endif #endif static int vmax(int* a, int s) { int i, max = 0; for (i = 0; i < s; i++) { if (a[i] > max) { max = a[i]; } } return max; } /* NxMxK the size of the data * comm communicator to use for fft5d * P0 number of processor in 1st axes (can be null for automatic) * lin is allocated by fft5d because size of array is only known after planning phase * rlout2 is only used as intermediate buffer - only returned after allocation to reuse for back transform - should not be used by caller */ fft5d_plan fft5d_plan_3d(int NG, int MG, int KG, MPI_Comm comm[2], int flags, t_complex** rlin, t_complex** rlout, t_complex** rlout2, t_complex** rlout3, int nthreads) { int P[2], bMaster, prank[2], i, t; int rNG, rMG, rKG; int *N0 = 0, *N1 = 0, *M0 = 0, *M1 = 0, *K0 = 0, *K1 = 0, *oN0 = 0, *oN1 = 0, *oM0 = 0, *oM1 = 0, *oK0 = 0, *oK1 = 0; int N[3], M[3], K[3], pN[3], pM[3], pK[3], oM[3], oK[3], *iNin[3] = {0}, *oNin[3] = {0}, *iNout[3] = {0}, *oNout[3] = {0}; int C[3], rC[3], nP[2]; int lsize; t_complex *lin = 0, *lout = 0, *lout2 = 0, *lout3 = 0; fft5d_plan plan; int s; /* comm, prank and P are in the order of the decomposition (plan->cart is in the order of transposes) */ #ifdef GMX_MPI if (GMX_PARALLEL_ENV_INITIALIZED && comm[0] != MPI_COMM_NULL) { MPI_Comm_size(comm[0], &P[0]); MPI_Comm_rank(comm[0], &prank[0]); } else #endif { P[0] = 1; prank[0] = 0; } #ifdef GMX_MPI if (GMX_PARALLEL_ENV_INITIALIZED && comm[1] != MPI_COMM_NULL) { MPI_Comm_size(comm[1], &P[1]); MPI_Comm_rank(comm[1], &prank[1]); } else #endif { P[1] = 1; prank[1] = 0; } bMaster = (prank[0] == 0 && prank[1] == 0); if (debug) { fprintf(debug, "FFT5D: Using %dx%d processor grid, rank %d,%d\n", P[0], P[1], prank[0], prank[1]); } if (bMaster) { if (debug) { fprintf(debug, "FFT5D: N: %d, M: %d, K: %d, P: %dx%d, real2complex: %d, backward: %d, order yz: %d, debug %d\n", NG, MG, KG, P[0], P[1], (flags&FFT5D_REALCOMPLEX) > 0, (flags&FFT5D_BACKWARD) > 0, (flags&FFT5D_ORDER_YZ) > 0, (flags&FFT5D_DEBUG) > 0); } /* The check below is not correct, one prime factor 11 or 13 is ok. if (fft5d_fmax(fft5d_fmax(lpfactor(NG),lpfactor(MG)),lpfactor(KG))>7) { printf("WARNING: FFT very slow with prime factors larger 7\n"); printf("Change FFT size or in case you cannot change it look at\n"); printf("http://www.fftw.org/fftw3_doc/Generating-your-own-code.html\n"); } */ } if (NG == 0 || MG == 0 || KG == 0) { if (bMaster) { printf("FFT5D: FATAL: Datasize cannot be zero in any dimension\n"); } return 0; } rNG = NG; rMG = MG; rKG = KG; if (flags&FFT5D_REALCOMPLEX) { if (!(flags&FFT5D_BACKWARD)) { NG = NG/2+1; } else { if (!(flags&FFT5D_ORDER_YZ)) { MG = MG/2+1; } else { KG = KG/2+1; } } } /*for transpose we need to know the size for each processor not only our own size*/ N0 = (int*)malloc(P[0]*sizeof(int)); N1 = (int*)malloc(P[1]*sizeof(int)); M0 = (int*)malloc(P[0]*sizeof(int)); M1 = (int*)malloc(P[1]*sizeof(int)); K0 = (int*)malloc(P[0]*sizeof(int)); K1 = (int*)malloc(P[1]*sizeof(int)); oN0 = (int*)malloc(P[0]*sizeof(int)); oN1 = (int*)malloc(P[1]*sizeof(int)); oM0 = (int*)malloc(P[0]*sizeof(int)); oM1 = (int*)malloc(P[1]*sizeof(int)); oK0 = (int*)malloc(P[0]*sizeof(int)); oK1 = (int*)malloc(P[1]*sizeof(int)); for (i = 0; i < P[0]; i++) { #define EVENDIST #ifndef EVENDIST oN0[i] = i*ceil((double)NG/P[0]); oM0[i] = i*ceil((double)MG/P[0]); oK0[i] = i*ceil((double)KG/P[0]); #else oN0[i] = (NG*i)/P[0]; oM0[i] = (MG*i)/P[0]; oK0[i] = (KG*i)/P[0]; #endif } for (i = 0; i < P[1]; i++) { #ifndef EVENDIST oN1[i] = i*ceil((double)NG/P[1]); oM1[i] = i*ceil((double)MG/P[1]); oK1[i] = i*ceil((double)KG/P[1]); #else oN1[i] = (NG*i)/P[1]; oM1[i] = (MG*i)/P[1]; oK1[i] = (KG*i)/P[1]; #endif } for (i = 0; i < P[0]-1; i++) { N0[i] = oN0[i+1]-oN0[i]; M0[i] = oM0[i+1]-oM0[i]; K0[i] = oK0[i+1]-oK0[i]; } N0[P[0]-1] = NG-oN0[P[0]-1]; M0[P[0]-1] = MG-oM0[P[0]-1]; K0[P[0]-1] = KG-oK0[P[0]-1]; for (i = 0; i < P[1]-1; i++) { N1[i] = oN1[i+1]-oN1[i]; M1[i] = oM1[i+1]-oM1[i]; K1[i] = oK1[i+1]-oK1[i]; } N1[P[1]-1] = NG-oN1[P[1]-1]; M1[P[1]-1] = MG-oM1[P[1]-1]; K1[P[1]-1] = KG-oK1[P[1]-1]; /* for step 1-3 the local N,M,K sizes of the transposed system C: contiguous dimension, and nP: number of processor in subcommunicator for that step */ pM[0] = M0[prank[0]]; oM[0] = oM0[prank[0]]; pK[0] = K1[prank[1]]; oK[0] = oK1[prank[1]]; C[0] = NG; rC[0] = rNG; if (!(flags&FFT5D_ORDER_YZ)) { N[0] = vmax(N1, P[1]); M[0] = M0[prank[0]]; K[0] = vmax(K1, P[1]); pN[0] = N1[prank[1]]; iNout[0] = N1; oNout[0] = oN1; nP[0] = P[1]; C[1] = KG; rC[1] = rKG; N[1] = vmax(K0, P[0]); pN[1] = K0[prank[0]]; iNin[1] = K1; oNin[1] = oK1; iNout[1] = K0; oNout[1] = oK0; M[1] = vmax(M0, P[0]); pM[1] = M0[prank[0]]; oM[1] = oM0[prank[0]]; K[1] = N1[prank[1]]; pK[1] = N1[prank[1]]; oK[1] = oN1[prank[1]]; nP[1] = P[0]; C[2] = MG; rC[2] = rMG; iNin[2] = M0; oNin[2] = oM0; M[2] = vmax(K0, P[0]); pM[2] = K0[prank[0]]; oM[2] = oK0[prank[0]]; K[2] = vmax(N1, P[1]); pK[2] = N1[prank[1]]; oK[2] = oN1[prank[1]]; free(N0); free(oN0); /*these are not used for this order*/ free(M1); free(oM1); /*the rest is freed in destroy*/ } else { N[0] = vmax(N0, P[0]); M[0] = vmax(M0, P[0]); K[0] = K1[prank[1]]; pN[0] = N0[prank[0]]; iNout[0] = N0; oNout[0] = oN0; nP[0] = P[0]; C[1] = MG; rC[1] = rMG; N[1] = vmax(M1, P[1]); pN[1] = M1[prank[1]]; iNin[1] = M0; oNin[1] = oM0; iNout[1] = M1; oNout[1] = oM1; M[1] = N0[prank[0]]; pM[1] = N0[prank[0]]; oM[1] = oN0[prank[0]]; K[1] = vmax(K1, P[1]); pK[1] = K1[prank[1]]; oK[1] = oK1[prank[1]]; nP[1] = P[1]; C[2] = KG; rC[2] = rKG; iNin[2] = K1; oNin[2] = oK1; M[2] = vmax(N0, P[0]); pM[2] = N0[prank[0]]; oM[2] = oN0[prank[0]]; K[2] = vmax(M1, P[1]); pK[2] = M1[prank[1]]; oK[2] = oM1[prank[1]]; free(N1); free(oN1); /*these are not used for this order*/ free(K0); free(oK0); /*the rest is freed in destroy*/ } N[2] = pN[2] = -1; /*not used*/ /* Difference between x-y-z regarding 2d decomposition is whether they are distributed along axis 1, 2 or both */ /* int lsize = fmax(N[0]*M[0]*K[0]*nP[0],N[1]*M[1]*K[1]*nP[1]); */ lsize = fft5d_fmax(N[0]*M[0]*K[0]*nP[0], fft5d_fmax(N[1]*M[1]*K[1]*nP[1], C[2]*M[2]*K[2])); /* int lsize = fmax(C[0]*M[0]*K[0],fmax(C[1]*M[1]*K[1],C[2]*M[2]*K[2])); */ if (!(flags&FFT5D_NOMALLOC)) { snew_aligned(lin, lsize, 32); snew_aligned(lout, lsize, 32); if (nthreads > 1) { /* We need extra transpose buffers to avoid OpenMP barriers */ snew_aligned(lout2, lsize, 32); snew_aligned(lout3, lsize, 32); } else { /* We can reuse the buffers to avoid cache misses */ lout2 = lin; lout3 = lout; } } else { lin = *rlin; lout = *rlout; if (nthreads > 1) { lout2 = *rlout2; lout3 = *rlout3; } else { lout2 = lin; lout3 = lout; } } plan = (fft5d_plan)calloc(1, sizeof(struct fft5d_plan_t)); if (debug) { fprintf(debug, "Running on %d threads\n", nthreads); } #ifdef GMX_FFT_FFTW3 /*if not FFTW - then we don't do a 3d plan but instead use only 1D plans */ /* It is possible to use the 3d plan with OMP threads - but in that case it is not allowed to be called from * within a parallel region. For now deactivated. If it should be supported it has to made sure that * that the execute of the 3d plan is in a master/serial block (since it contains it own parallel region) * and that the 3d plan is faster than the 1d plan. */ if ((!(flags&FFT5D_INPLACE)) && (!(P[0] > 1 || P[1] > 1)) && nthreads == 1) /*don't do 3d plan in parallel or if in_place requested */ { int fftwflags = FFTW_DESTROY_INPUT; FFTW(iodim) dims[3]; int inNG = NG, outMG = MG, outKG = KG; FFTW_LOCK; if (!(flags&FFT5D_NOMEASURE)) { fftwflags |= FFTW_MEASURE; } if (flags&FFT5D_REALCOMPLEX) { if (!(flags&FFT5D_BACKWARD)) /*input pointer is not complex*/ { inNG *= 2; } else /*output pointer is not complex*/ { if (!(flags&FFT5D_ORDER_YZ)) { outMG *= 2; } else { outKG *= 2; } } } if (!(flags&FFT5D_BACKWARD)) { dims[0].n = KG; dims[1].n = MG; dims[2].n = rNG; dims[0].is = inNG*MG; /*N M K*/ dims[1].is = inNG; dims[2].is = 1; if (!(flags&FFT5D_ORDER_YZ)) { dims[0].os = MG; /*M K N*/ dims[1].os = 1; dims[2].os = MG*KG; } else { dims[0].os = 1; /*K N M*/ dims[1].os = KG*NG; dims[2].os = KG; } } else { if (!(flags&FFT5D_ORDER_YZ)) { dims[0].n = NG; dims[1].n = KG; dims[2].n = rMG; dims[0].is = 1; dims[1].is = NG*MG; dims[2].is = NG; dims[0].os = outMG*KG; dims[1].os = outMG; dims[2].os = 1; } else { dims[0].n = MG; dims[1].n = NG; dims[2].n = rKG; dims[0].is = NG; dims[1].is = 1; dims[2].is = NG*MG; dims[0].os = outKG*NG; dims[1].os = outKG; dims[2].os = 1; } } #ifdef FFT5D_THREADS #ifdef FFT5D_FFTW_THREADS FFTW(plan_with_nthreads)(nthreads); #endif #endif if ((flags&FFT5D_REALCOMPLEX) && !(flags&FFT5D_BACKWARD)) { plan->p3d = FFTW(plan_guru_dft_r2c)(/*rank*/ 3, dims, /*howmany*/ 0, /*howmany_dims*/ 0, (real*)lin, (FFTW(complex) *) lout, /*flags*/ fftwflags); } else if ((flags&FFT5D_REALCOMPLEX) && (flags&FFT5D_BACKWARD)) { plan->p3d = FFTW(plan_guru_dft_c2r)(/*rank*/ 3, dims, /*howmany*/ 0, /*howmany_dims*/ 0, (FFTW(complex) *) lin, (real*)lout, /*flags*/ fftwflags); } else { plan->p3d = FFTW(plan_guru_dft)(/*rank*/ 3, dims, /*howmany*/ 0, /*howmany_dims*/ 0, (FFTW(complex) *) lin, (FFTW(complex) *) lout, /*sign*/ (flags&FFT5D_BACKWARD) ? 1 : -1, /*flags*/ fftwflags); } #ifdef FFT5D_THREADS #ifdef FFT5D_FFTW_THREADS FFTW(plan_with_nthreads)(1); #endif #endif FFTW_UNLOCK; } if (!plan->p3d) /* for decomposition and if 3d plan did not work */ { #endif /* GMX_FFT_FFTW3 */ for (s = 0; s < 3; s++) { if (debug) { fprintf(debug, "FFT5D: Plan s %d rC %d M %d pK %d C %d lsize %d\n", s, rC[s], M[s], pK[s], C[s], lsize); } plan->p1d[s] = (gmx_fft_t*)malloc(sizeof(gmx_fft_t)*nthreads); /* Make sure that the init routines are only called by one thread at a time and in order (later is only important to not confuse valgrind) */ #pragma omp parallel for num_threads(nthreads) schedule(static) ordered for (t = 0; t < nthreads; t++) { #pragma omp ordered { int tsize = ((t+1)*pM[s]*pK[s]/nthreads)-(t*pM[s]*pK[s]/nthreads); if ((flags&FFT5D_REALCOMPLEX) && ((!(flags&FFT5D_BACKWARD) && s == 0) || ((flags&FFT5D_BACKWARD) && s == 2))) { gmx_fft_init_many_1d_real( &plan->p1d[s][t], rC[s], tsize, (flags&FFT5D_NOMEASURE) ? GMX_FFT_FLAG_CONSERVATIVE : 0 ); } else { gmx_fft_init_many_1d ( &plan->p1d[s][t], C[s], tsize, (flags&FFT5D_NOMEASURE) ? GMX_FFT_FLAG_CONSERVATIVE : 0 ); } } } } #ifdef GMX_FFT_FFTW3 } #endif if ((flags&FFT5D_ORDER_YZ)) /*plan->cart is in the order of transposes */ { plan->cart[0] = comm[0]; plan->cart[1] = comm[1]; } else { plan->cart[1] = comm[0]; plan->cart[0] = comm[1]; } #ifdef FFT5D_MPI_TRANSPOSE FFTW_LOCK; for (s = 0; s < 2; s++) { if ((s == 0 && !(flags&FFT5D_ORDER_YZ)) || (s == 1 && (flags&FFT5D_ORDER_YZ))) { plan->mpip[s] = FFTW(mpi_plan_many_transpose)(nP[s], nP[s], N[s]*K[s]*pM[s]*2, 1, 1, (real*)lout2, (real*)lout3, plan->cart[s], FFTW_PATIENT); } else { plan->mpip[s] = FFTW(mpi_plan_many_transpose)(nP[s], nP[s], N[s]*pK[s]*M[s]*2, 1, 1, (real*)lout2, (real*)lout3, plan->cart[s], FFTW_PATIENT); } } FFTW_UNLOCK; #endif plan->lin = lin; plan->lout = lout; plan->lout2 = lout2; plan->lout3 = lout3; plan->NG = NG; plan->MG = MG; plan->KG = KG; for (s = 0; s < 3; s++) { plan->N[s] = N[s]; plan->M[s] = M[s]; plan->K[s] = K[s]; plan->pN[s] = pN[s]; plan->pM[s] = pM[s]; plan->pK[s] = pK[s]; plan->oM[s] = oM[s]; plan->oK[s] = oK[s]; plan->C[s] = C[s]; plan->rC[s] = rC[s]; plan->iNin[s] = iNin[s]; plan->oNin[s] = oNin[s]; plan->iNout[s] = iNout[s]; plan->oNout[s] = oNout[s]; } for (s = 0; s < 2; s++) { plan->P[s] = nP[s]; plan->coor[s] = prank[s]; } /* plan->fftorder=fftorder; plan->direction=direction; plan->realcomplex=realcomplex; */ plan->flags = flags; plan->nthreads = nthreads; *rlin = lin; *rlout = lout; *rlout2 = lout2; *rlout3 = lout3; return plan; } enum order { XYZ, XZY, YXZ, YZX, ZXY, ZYX }; /*here x,y,z and N,M,K is in rotated coordinate system!! x (and N) is mayor (consecutive) dimension, y (M) middle and z (K) major maxN,maxM,maxK is max size of local data pN, pM, pK is local size specific to current processor (only different to max if not divisible) NG, MG, KG is size of global data*/ static void splitaxes(t_complex* lout, const t_complex* lin, int maxN, int maxM, int maxK, int pN, int pM, int pK, int P, int NG, int *N, int* oN, int starty, int startz, int endy, int endz) { int x, y, z, i; int in_i, out_i, in_z, out_z, in_y, out_y; int s_y, e_y; for (z = startz; z < endz+1; z++) /*3. z l*/ { if (z == startz) { s_y = starty; } else { s_y = 0; } if (z == endz) { e_y = endy; } else { e_y = pM; } out_z = z*maxN*maxM; in_z = z*NG*pM; for (i = 0; i < P; i++) /*index cube along long axis*/ { out_i = out_z + i*maxN*maxM*maxK; in_i = in_z + oN[i]; for (y = s_y; y < e_y; y++) /*2. y k*/ { out_y = out_i + y*maxN; in_y = in_i + y*NG; for (x = 0; x < N[i]; x++) /*1. x j*/ { lout[out_y+x] = lin[in_y+x]; /*in=z*NG*pM+oN[i]+y*NG+x*/ /*after split important that each processor chunk i has size maxN*maxM*maxK and thus being the same size*/ /*before split data contiguos - thus if different processor get different amount oN is different*/ } } } } } /*make axis contiguous again (after AllToAll) and also do local transpose*/ /*transpose mayor and major dimension variables see above the major, middle, minor order is only correct for x,y,z (N,M,K) for the input N,M,K local dimensions KG global size*/ static void joinAxesTrans13(t_complex* lout, const t_complex* lin, int maxN, int maxM, int maxK, int pN, int pM, int pK, int P, int KG, int* K, int* oK, int starty, int startx, int endy, int endx) { int i, x, y, z; int out_i, in_i, out_x, in_x, out_z, in_z; int s_y, e_y; for (x = startx; x < endx+1; x++) /*1.j*/ { if (x == startx) { s_y = starty; } else { s_y = 0; } if (x == endx) { e_y = endy; } else { e_y = pM; } out_x = x*KG*pM; in_x = x; for (i = 0; i < P; i++) /*index cube along long axis*/ { out_i = out_x + oK[i]; in_i = in_x + i*maxM*maxN*maxK; for (z = 0; z < K[i]; z++) /*3.l*/ { out_z = out_i + z; in_z = in_i + z*maxM*maxN; for (y = s_y; y < e_y; y++) /*2.k*/ { lout[out_z+y*KG] = lin[in_z+y*maxN]; /*out=x*KG*pM+oK[i]+z+y*KG*/ } } } } } /*make axis contiguous again (after AllToAll) and also do local transpose tranpose mayor and middle dimension variables see above the minor, middle, major order is only correct for x,y,z (N,M,K) for the input N,M,K local size MG, global size*/ static void joinAxesTrans12(t_complex* lout, const t_complex* lin, int maxN, int maxM, int maxK, int pN, int pM, int pK, int P, int MG, int* M, int* oM, int startx, int startz, int endx, int endz) { int i, z, y, x; int out_i, in_i, out_z, in_z, out_x, in_x; int s_x, e_x; for (z = startz; z < endz+1; z++) { if (z == startz) { s_x = startx; } else { s_x = 0; } if (z == endz) { e_x = endx; } else { e_x = pN; } out_z = z*MG*pN; in_z = z*maxM*maxN; for (i = 0; i < P; i++) /*index cube along long axis*/ { out_i = out_z + oM[i]; in_i = in_z + i*maxM*maxN*maxK; for (x = s_x; x < e_x; x++) { out_x = out_i + x*MG; in_x = in_i + x; for (y = 0; y < M[i]; y++) { lout[out_x+y] = lin[in_x+y*maxN]; /*out=z*MG*pN+oM[i]+x*MG+y*/ } } } } } static void rotate_offsets(int x[]) { int t = x[0]; /* x[0]=x[2]; x[2]=x[1]; x[1]=t;*/ x[0] = x[1]; x[1] = x[2]; x[2] = t; } /*compute the offset to compare or print transposed local data in original input coordinates xs matrix dimension size, xl dimension length, xc decomposition offset s: step in computation = number of transposes*/ static void compute_offsets(fft5d_plan plan, int xs[], int xl[], int xc[], int NG[], int s) { /* int direction = plan->direction; int fftorder = plan->fftorder;*/ int o; int pos[3], i; int *pM = plan->pM, *pK = plan->pK, *oM = plan->oM, *oK = plan->oK, *C = plan->C, *rC = plan->rC; NG[0] = plan->NG; NG[1] = plan->MG; NG[2] = plan->KG; if (!(plan->flags&FFT5D_ORDER_YZ)) { switch (s) { case 0: o = XYZ; break; case 1: o = ZYX; break; case 2: o = YZX; break; default: assert(0); } } else { switch (s) { case 0: o = XYZ; break; case 1: o = YXZ; break; case 2: o = ZXY; break; default: assert(0); } } switch (o) { case XYZ: pos[0] = 1; pos[1] = 2; pos[2] = 3; break; case XZY: pos[0] = 1; pos[1] = 3; pos[2] = 2; break; case YXZ: pos[0] = 2; pos[1] = 1; pos[2] = 3; break; case YZX: pos[0] = 3; pos[1] = 1; pos[2] = 2; break; case ZXY: pos[0] = 2; pos[1] = 3; pos[2] = 1; break; case ZYX: pos[0] = 3; pos[1] = 2; pos[2] = 1; break; } /*if (debug) printf("pos: %d %d %d\n",pos[0],pos[1],pos[2]);*/ /*xs, xl give dimension size and data length in local transposed coordinate system for 0(/1/2): x(/y/z) in original coordinate system*/ for (i = 0; i < 3; i++) { switch (pos[i]) { case 1: xs[i] = 1; xc[i] = 0; xl[i] = C[s]; break; case 2: xs[i] = C[s]; xc[i] = oM[s]; xl[i] = pM[s]; break; case 3: xs[i] = C[s]*pM[s]; xc[i] = oK[s]; xl[i] = pK[s]; break; } } /*input order is different for test program to match FFTW order (important for complex to real)*/ if (plan->flags&FFT5D_BACKWARD) { rotate_offsets(xs); rotate_offsets(xl); rotate_offsets(xc); rotate_offsets(NG); if (plan->flags&FFT5D_ORDER_YZ) { rotate_offsets(xs); rotate_offsets(xl); rotate_offsets(xc); rotate_offsets(NG); } } if ((plan->flags&FFT5D_REALCOMPLEX) && ((!(plan->flags&FFT5D_BACKWARD) && s == 0) || ((plan->flags&FFT5D_BACKWARD) && s == 2))) { xl[0] = rC[s]; } } static void print_localdata(const t_complex* lin, const char* txt, int s, fft5d_plan plan) { int x, y, z, l, t; int *coor = plan->coor; int xs[3], xl[3], xc[3], NG[3]; int ll = (plan->flags&FFT5D_REALCOMPLEX) ? 1 : 2; compute_offsets(plan, xs, xl, xc, NG, s); fprintf(debug, txt, coor[0], coor[1], s); /*printf("xs: %d %d %d, xl: %d %d %d\n",xs[0],xs[1],xs[2],xl[0],xl[1],xl[2]);*/ for (z = 0; z < xl[2]; z++) { for (y = 0; y < xl[1]; y++) { fprintf(debug, "%d %d: ", coor[0], coor[1]); for (x = 0; x < xl[0]; x++) { for (l = 0; l < ll; l++) { fprintf(debug, "%f ", ((real*)lin)[(z*xs[2]+y*xs[1])*2+(x*xs[0])*ll+l]); } fprintf(debug, ","); } fprintf(debug, "\n"); } } } void fft5d_execute(fft5d_plan plan, int thread, fft5d_time times) { t_complex *lin = plan->lin; t_complex *lout = plan->lout; t_complex *lout2 = plan->lout2; t_complex *lout3 = plan->lout3; t_complex *fftout, *joinin; gmx_fft_t **p1d = plan->p1d; #ifdef FFT5D_MPI_TRANSPOSE FFTW(plan) *mpip = plan->mpip; #endif #ifdef GMX_MPI MPI_Comm *cart = plan->cart; #endif double time_fft = 0, time_local = 0, time_mpi[2] = {0}, time = 0; int *N = plan->N, *M = plan->M, *K = plan->K, *pN = plan->pN, *pM = plan->pM, *pK = plan->pK, *C = plan->C, *P = plan->P, **iNin = plan->iNin, **oNin = plan->oNin, **iNout = plan->iNout, **oNout = plan->oNout; int s = 0, tstart, tend, bParallelDim; #ifdef GMX_FFT_FFTW3 if (plan->p3d) { if (thread == 0) { #ifdef NOGMX if (times != 0) { time = MPI_Wtime(); } #endif FFTW(execute)(plan->p3d); #ifdef NOGMX if (times != 0) { times->fft += MPI_Wtime()-time; } #endif } return; } #endif s = 0; /*lin: x,y,z*/ if (plan->flags&FFT5D_DEBUG && thread == 0) { print_localdata(lin, "%d %d: copy in lin\n", s, plan); } for (s = 0; s < 2; s++) /*loop over first two FFT steps (corner rotations)*/ { #ifdef GMX_MPI if (GMX_PARALLEL_ENV_INITIALIZED && cart[s] != MPI_COMM_NULL && P[s] > 1) { bParallelDim = 1; } else #endif { bParallelDim = 0; } /* ---------- START FFT ------------ */ #ifdef NOGMX if (times != 0 && thread == 0) { time = MPI_Wtime(); } #endif if (bParallelDim || plan->nthreads == 1) { fftout = lout; } else { if (s == 0) { fftout = lout3; } else { fftout = lout2; } } tstart = (thread*pM[s]*pK[s]/plan->nthreads)*C[s]; if ((plan->flags&FFT5D_REALCOMPLEX) && !(plan->flags&FFT5D_BACKWARD) && s == 0) { gmx_fft_many_1d_real(p1d[s][thread], (plan->flags&FFT5D_BACKWARD) ? GMX_FFT_COMPLEX_TO_REAL : GMX_FFT_REAL_TO_COMPLEX, lin+tstart, fftout+tstart); } else { gmx_fft_many_1d( p1d[s][thread], (plan->flags&FFT5D_BACKWARD) ? GMX_FFT_BACKWARD : GMX_FFT_FORWARD, lin+tstart, fftout+tstart); } #ifdef NOGMX if (times != NULL && thread == 0) { time_fft += MPI_Wtime()-time; } #endif if (plan->flags&FFT5D_DEBUG && thread == 0) { print_localdata(lout, "%d %d: FFT %d\n", s, plan); } /* ---------- END FFT ------------ */ /* ---------- START SPLIT + TRANSPOSE------------ (if parallel in in this dimension)*/ if (bParallelDim) { #ifdef NOGMX if (times != NULL && thread == 0) { time = MPI_Wtime(); } #endif /*prepare for A llToAll 1. (most outer) axes (x) is split into P[s] parts of size N[s] for sending*/ if (pM[s] > 0) { tend = ((thread+1)*pM[s]*pK[s]/plan->nthreads); tstart /= C[s]; splitaxes(lout2, lout, N[s], M[s], K[s], pN[s], pM[s], pK[s], P[s], C[s], iNout[s], oNout[s], tstart%pM[s], tstart/pM[s], tend%pM[s], tend/pM[s]); } #pragma omp barrier /*barrier required before AllToAll (all input has to be their) - before timing to make timing more acurate*/ #ifdef NOGMX if (times != NULL && thread == 0) { time_local += MPI_Wtime()-time; } #endif /* ---------- END SPLIT , START TRANSPOSE------------ */ if (thread == 0) { #ifdef NOGMX if (times != 0) { time = MPI_Wtime(); } #else wallcycle_start(times, ewcPME_FFTCOMM); #endif #ifdef FFT5D_MPI_TRANSPOSE FFTW(execute)(mpip[s]); #else #ifdef GMX_MPI if ((s == 0 && !(plan->flags&FFT5D_ORDER_YZ)) || (s == 1 && (plan->flags&FFT5D_ORDER_YZ))) { MPI_Alltoall((real *)lout2, N[s]*pM[s]*K[s]*sizeof(t_complex)/sizeof(real), GMX_MPI_REAL, (real *)lout3, N[s]*pM[s]*K[s]*sizeof(t_complex)/sizeof(real), GMX_MPI_REAL, cart[s]); } else { MPI_Alltoall((real *)lout2, N[s]*M[s]*pK[s]*sizeof(t_complex)/sizeof(real), GMX_MPI_REAL, (real *)lout3, N[s]*M[s]*pK[s]*sizeof(t_complex)/sizeof(real), GMX_MPI_REAL, cart[s]); } #else gmx_incons("fft5d MPI call without MPI configuration"); #endif /*GMX_MPI*/ #endif /*FFT5D_MPI_TRANSPOSE*/ #ifdef NOGMX if (times != 0) { time_mpi[s] = MPI_Wtime()-time; } #else wallcycle_stop(times, ewcPME_FFTCOMM); #endif } /*master*/ } /* bPrallelDim */ #pragma omp barrier /*both needed for parallel and non-parallel dimension (either have to wait on data from AlltoAll or from last FFT*/ /* ---------- END SPLIT + TRANSPOSE------------ */ /* ---------- START JOIN ------------ */ #ifdef NOGMX if (times != NULL && thread == 0) { time = MPI_Wtime(); } #endif if (bParallelDim) { joinin = lout3; } else { joinin = fftout; } /*bring back in matrix form thus make new 1. axes contiguos also local transpose 1 and 2/3 runs on thread used for following FFT (thus needing a barrier before but not afterwards) */ if ((s == 0 && !(plan->flags&FFT5D_ORDER_YZ)) || (s == 1 && (plan->flags&FFT5D_ORDER_YZ))) { if (pM[s] > 0) { tstart = ( thread *pM[s]*pN[s]/plan->nthreads); tend = ((thread+1)*pM[s]*pN[s]/plan->nthreads); joinAxesTrans13(lin, joinin, N[s], pM[s], K[s], pN[s], pM[s], pK[s], P[s], C[s+1], iNin[s+1], oNin[s+1], tstart%pM[s], tstart/pM[s], tend%pM[s], tend/pM[s]); } } else { if (pN[s] > 0) { tstart = ( thread *pK[s]*pN[s]/plan->nthreads); tend = ((thread+1)*pK[s]*pN[s]/plan->nthreads); joinAxesTrans12(lin, joinin, N[s], M[s], pK[s], pN[s], pM[s], pK[s], P[s], C[s+1], iNin[s+1], oNin[s+1], tstart%pN[s], tstart/pN[s], tend%pN[s], tend/pN[s]); } } #ifdef NOGMX if (times != NULL && thread == 0) { time_local += MPI_Wtime()-time; } #endif if (plan->flags&FFT5D_DEBUG && thread == 0) { print_localdata(lin, "%d %d: tranposed %d\n", s+1, plan); } /* ---------- END JOIN ------------ */ /*if (debug) print_localdata(lin, "%d %d: transposed x-z\n", N1, M0, K, ZYX, coor);*/ } /* for(s=0;s<2;s++) */ #ifdef NOGMX if (times != NULL && thread == 0) { time = MPI_Wtime(); } #endif if (plan->flags&FFT5D_INPLACE) { lout = lin; /*in place currently not supported*/ } /* ----------- FFT ----------- */ tstart = (thread*pM[s]*pK[s]/plan->nthreads)*C[s]; if ((plan->flags&FFT5D_REALCOMPLEX) && (plan->flags&FFT5D_BACKWARD)) { gmx_fft_many_1d_real(p1d[s][thread], (plan->flags&FFT5D_BACKWARD) ? GMX_FFT_COMPLEX_TO_REAL : GMX_FFT_REAL_TO_COMPLEX, lin+tstart, lout+tstart); } else { gmx_fft_many_1d( p1d[s][thread], (plan->flags&FFT5D_BACKWARD) ? GMX_FFT_BACKWARD : GMX_FFT_FORWARD, lin+tstart, lout+tstart); } /* ------------ END FFT ---------*/ #ifdef NOGMX if (times != NULL && thread == 0) { time_fft += MPI_Wtime()-time; times->fft += time_fft; times->local += time_local; times->mpi2 += time_mpi[1]; times->mpi1 += time_mpi[0]; } #endif if (plan->flags&FFT5D_DEBUG && thread == 0) { print_localdata(lout, "%d %d: FFT %d\n", s, plan); } } void fft5d_destroy(fft5d_plan plan) { int s, t; for (s = 0; s < 3; s++) { if (plan->p1d[s]) { for (t = 0; t < plan->nthreads; t++) { gmx_many_fft_destroy(plan->p1d[s][t]); } free(plan->p1d[s]); } if (plan->iNin[s]) { free(plan->iNin[s]); plan->iNin[s] = 0; } if (plan->oNin[s]) { free(plan->oNin[s]); plan->oNin[s] = 0; } if (plan->iNout[s]) { free(plan->iNout[s]); plan->iNout[s] = 0; } if (plan->oNout[s]) { free(plan->oNout[s]); plan->oNout[s] = 0; } } #ifdef GMX_FFT_FFTW3 FFTW_LOCK; #ifdef FFT5D_MPI_TRANSPOS for (s = 0; s < 2; s++) { FFTW(destroy_plan)(plan->mpip[s]); } if (plan->p3d) { FFTW(destroy_plan)(plan->p3d); } #endif /* FFT5D_MPI_TRANSPOS */ #endif /* GMX_FFT_FFTW3 */ if (!(plan->flags&FFT5D_NOMALLOC)) { sfree_aligned(plan->lin); sfree_aligned(plan->lout); if (plan->nthreads > 1) { sfree_aligned(plan->lout2); sfree_aligned(plan->lout3); } } #ifdef FFT5D_THREADS #ifdef FFT5D_FFTW_THREADS /*FFTW(cleanup_threads)();*/ #endif #endif free(plan); } /*Is this better than direct access of plan? enough data? here 0,1 reference divided by which processor grid dimension (not FFT step!)*/ void fft5d_local_size(fft5d_plan plan, int* N1, int* M0, int* K0, int* K1, int** coor) { *N1 = plan->N[0]; *M0 = plan->M[0]; *K1 = plan->K[0]; *K0 = plan->N[1]; *coor = plan->coor; } /*same as fft5d_plan_3d but with cartesian coordinator and automatic splitting of processor dimensions*/ fft5d_plan fft5d_plan_3d_cart(int NG, int MG, int KG, MPI_Comm comm, int P0, int flags, t_complex** rlin, t_complex** rlout, t_complex** rlout2, t_complex** rlout3, int nthreads) { MPI_Comm cart[2] = {0}; #ifdef GMX_MPI int size = 1, prank = 0; int P[2]; int coor[2]; int wrap[] = {0, 0}; MPI_Comm gcart; int rdim1[] = {0, 1}, rdim2[] = {1, 0}; MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &prank); if (P0 == 0) { P0 = lfactor(size); } if (size%P0 != 0) { if (prank == 0) { printf("FFT5D: WARNING: Number of processors %d not evenly dividable by %d\n", size, P0); } P0 = lfactor(size); } P[0] = P0; P[1] = size/P0; /*number of processors in the two dimensions*/ /*Difference between x-y-z regarding 2d decomposition is whether they are distributed along axis 1, 2 or both*/ MPI_Cart_create(comm, 2, P, wrap, 1, &gcart); /*parameter 4: value 1: reorder*/ MPI_Cart_get(gcart, 2, P, wrap, coor); MPI_Cart_sub(gcart, rdim1, &cart[0]); MPI_Cart_sub(gcart, rdim2, &cart[1]); #endif return fft5d_plan_3d(NG, MG, KG, cart, flags, rlin, rlout, rlout2, rlout3, nthreads); } /*prints in original coordinate system of data (as the input to FFT)*/ void fft5d_compare_data(const t_complex* lin, const t_complex* in, fft5d_plan plan, int bothLocal, int normalize) { int xs[3], xl[3], xc[3], NG[3]; int x, y, z, l; int *coor = plan->coor; int ll = 2; /*compare ll values per element (has to be 2 for complex)*/ if ((plan->flags&FFT5D_REALCOMPLEX) && (plan->flags&FFT5D_BACKWARD)) { ll = 1; } compute_offsets(plan, xs, xl, xc, NG, 2); if (plan->flags&FFT5D_DEBUG) { printf("Compare2\n"); } for (z = 0; z < xl[2]; z++) { for (y = 0; y < xl[1]; y++) { if (plan->flags&FFT5D_DEBUG) { printf("%d %d: ", coor[0], coor[1]); } for (x = 0; x < xl[0]; x++) { for (l = 0; l < ll; l++) /*loop over real/complex parts*/ { real a, b; a = ((real*)lin)[(z*xs[2]+y*xs[1])*2+x*xs[0]*ll+l]; if (normalize) { a /= plan->rC[0]*plan->rC[1]*plan->rC[2]; } if (!bothLocal) { b = ((real*)in)[((z+xc[2])*NG[0]*NG[1]+(y+xc[1])*NG[0])*2+(x+xc[0])*ll+l]; } else { b = ((real*)in)[(z*xs[2]+y*xs[1])*2+x*xs[0]*ll+l]; } if (plan->flags&FFT5D_DEBUG) { printf("%f %f, ", a, b); } else { if (fabs(a-b) > 2*NG[0]*NG[1]*NG[2]*GMX_REAL_EPS) { printf("result incorrect on %d,%d at %d,%d,%d: FFT5D:%f reference:%f\n", coor[0], coor[1], x, y, z, a, b); } /* assert(fabs(a-b)<2*NG[0]*NG[1]*NG[2]*GMX_REAL_EPS);*/ } } if (plan->flags&FFT5D_DEBUG) { printf(","); } } if (plan->flags&FFT5D_DEBUG) { printf("\n"); } } } }
fill_nr_s8.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cint.h" #include "cvhf.h" #include "nr_direct.h" #include "optimizer.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) int GTOmax_shell_dim(int *ao_loc, int *shls, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env); /* * 8-fold symmetry, k>=l, k>=i>=j, */ static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int *atm = envs->atm; const int *bas = envs->bas; const double *env = envs->env; const int natm = envs->natm; const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const CINTOpt *cintopt = envs->cintopt; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double *cache = eri + di * dj * nao2; int dims[4] = {nao, nao, dj, di}; int ksh, lsh, ij, k, l; int shls[4]; double *peri; shls[2] = jsh; shls[3] = ish; for (ksh = 0; ksh <= ish; ksh++) { for (lsh = 0; lsh <= ksh; lsh++) { shls[0] = lsh; shls[1] = ksh; peri = eri + ao_loc[ksh] * nao + ao_loc[lsh]; if ((*fprescreen)(shls, vhfopt, atm, bas, env)) { (*intor)(peri, dims, shls, atm, natm, bas, nbas, env, cintopt, cache); } else { for (ij = 0; ij < di*dj; ij++) { for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) { for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) { peri[k*nao+l] = 0; } } peri += nao2; } } } } } static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int i, j, k, l, i0, j0, kl; size_t ij0; double *peri, *pbuf; fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs); for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) { for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) { if (i0 >= j0) { ij0 = i0*(i0+1)/2 + j0; peri = eri + ij0*(ij0+1)/2; pbuf = buf + nao2 * (i*dj+j); for (kl = 0, k = 0; k < i0; k++) { for (l = 0; l <= k; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } // k == i0 for (l = 0; l <= j0; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } } } } void GTO2e_cart_or_sph(int (*intor)(), CINTOpt *cintopt, double *eri, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { const int nao = ao_loc[nbas]; IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL, cintopt, 1}; CVHFOpt *vhfopt; CVHFnr_optimizer(&vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env); vhfopt->fprescreen = CVHFnr_schwarz_cond; int shls_slice[] = {0, nbas}; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1); const int cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel { int i, j, ij; double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size)); #pragma omp for nowait schedule(dynamic, 2) for (ij = 0; ij < nbas*(nbas+1)/2; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - (i*(i+1)/2); store_ij(intor, eri, buf, i, j, vhfopt, &envs); } free(buf); } CVHFdel_optimizer(&vhfopt); }
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_columns </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // </li> // <li> \ref custom_operations </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref openmp_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref configuration_files </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref custom_data_types </li> // <li> \ref error_reporting_customization </li> // <li> \ref intra_statement_optimization </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // In order for \b Blaze to work properly, the Boost library must be installed on the system. It // is recommended to use the newest Boost library available, but \b Blaze requires at minimum the // Boost version 1.54.0. If you don't have Boost installed on your system, you can download it for // free from 'http://www.boost.org'. // // Additionally, for maximum performance \b Blaze expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Furthermore, for computing the determinant of a dense matrix and for the dense matrix inversion // \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either of // these features is used it is necessary to link the LAPACK library to the final executable. If // no LAPACK library is available the use of these features will result in a linker error. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_installation_unix Linux/MacOSX User // // The first step is the installation of the header files. Since \b Blaze only consists of header // files, the <tt>./blaze</tt> subdirectory can be simply copied to a standard include directory // (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Windows User // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects of // \b Blaze can be adapted to specific requirements, environments and architectures by customizing // the header files in the <tt>./blaze/config/</tt> subdirectory. Since the default settings are // reasonable for most systems this step can also be skipped. However, in order to achieve maximum // performance a customization of at least the following configuration files is required: // // - <b><tt>./blaze/config/BLAS.h</tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt>./blaze/config/CacheSize.h</tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt>./blaze/config/Thresholds.h</tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays typedef CustomVector<int,unaligned,unpadded,columnVector> UnalignedUnpadded; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays typedef CustomVector<float,unaligned,padded,columnVector> UnalignedPadded; UnalignedPadded b( new float[16], 9UL, 16UL, blaze::ArrayDelete() ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays typedef CustomVector<double,aligned,unpadded,rowVector> AlignedUnpadded; AlignedUnpadded c( blaze::allocate<double>( 7UL ), 7UL, blaze::Deallocate() ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays typedef CustomVector<complex<double>,aligned,padded,columnVector> AlignedPadded; AlignedPadded d( allocate< complex<double> >( 8UL ), 5UL, 8UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // When constructing a custom vector there are two choices: Either a user manually manages the // array of elements outside the custom vector, or alternatively passes the responsibility for // the memory management to an instance of CustomVector. In the second case the CustomVector // class employs shared ownership between all copies of the custom vector, which reference the // same array. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::columnVector; using blaze::rowVector; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom row vector with size 3 for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom vector by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom vector. CustomVector<int,unaligned,unpadded,rowVector> b( new int[3], 3UL, ArrayDelete() ); // Definition of a custom vector with size 3 and capacity 16 with aligned and padded // integer array. The memory management is passed to the custom vector by providing a // deleter of type 'blaze::Deallocate'. CustomVector<int,aligned,padded> c( allocate<int>( 16UL ), 3UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; typedef CustomVector<int,unaligned,unpadded> CustomType; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affect vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. In case a deleter has been provided to the first // custom vector, both vectors share the responsibility to destroy the array when the last vector // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unpadded; int* array = blaze::allocate<int>( 5UL ); // Needs to be 32-bit aligned CustomVector<int,aligned,unpadded> a( array, 5UL, Deallocate() ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomVector<double,aligned,padded> CustomType; // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType b( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType c( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomVector<double,aligned,unpadded> CustomType; // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType b( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType c( allocate<double>( 3UL ), 3UL, Deallocate() ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is a multiple of the SIMD vector width. In case of unaligned // padded vectors \f$ N-1 \f$ additional padding elements are required, where \f$ N \f$ is // the SIMD vector width. In case the padding is insufficient with respect to the available // instruction set, a \c std::invalid_argument exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense vector classes can be directly initialized by means of an initializer // list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; \endcode // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v16( v7 ); // Instantiation of the dense column vector v16 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v17( v9 ); // Instantiation of the dense row vector v17 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v18( v1 ); // Instantiation of the sparse column vector v18 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v19( v12 ); // Instantiation of the sparse row vector v19 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v23( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v24( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense vector: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 4.0, -1.7, 8.6, -7.2 }; \endcode // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore, all // vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. The first // option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the vector // the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In contrast // to the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_member_functions Member Functions // <hr> // // \subsection vector_operations_size .size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are not preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code typedef blaze::DynamicVector<int,rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v1( 10UL ); // Creating a dynamic vector of size 10 SubvectorType sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // // \n \section vector_operations_free_functions Free Functions // <hr> // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, rows, and columns) is in default state if all its elements are in default state. // For instance, in case the vector is instantiated for a built-in integral or floating point data // type, the function returns \c true in case all vector elements are 0 and \c false in case any // vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse vector, respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, 4 }; blaze::StaticVector<int,4UL,rowVector> b{ -5, 2, -7, -4 }; min( a ); // Returns -5 min( b ); // Returns -7 max( a ); // Returns 7 max( b ); // Returns 2 \endcode // In case the vector currently has a size of 0, both functions return 0. Additionally, in case // a given sparse vector is not completely filled, the zero elements are taken into account. For // example: the following compressed vector has only 2 non-zero elements. However, the minimum // of this vector is 0: \code blaze::CompressedVector<int> c( 4UL, 2UL ); c[0] = 1; c[2] = 3; min( c ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operators_floor_ceil floor() / ceil() // // The \c floor() and \c ceil() functions can be used to round down/up each element of a vector, // respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil( a ); // Rounding up each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_clip clip() // // The \c clip() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clip( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = pow( a, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection vector_operations_exp exp() // // \c exp() computes the base e exponential of each element of a vector: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log10() // // The \c log() and \c log10() functions can be used to compute the natural and common logarithm // of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // vectors. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicVector<double> a, b; b = forEach( a, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length of a vector, both the \c length() and \c sqrLength() function // can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_transpose trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_conjugate_transpose ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays typedef CustomMatrix<int,unaligned,unpadded,rowMajor> UnalignedUnpadded; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays typedef CustomMatrix<float,unaligned,padded,columnMajor> UnalignedPadded; UnalignedPadded B( new float[40], 5UL, 6UL, 8UL, blaze::ArrayDelete() ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays typedef CustomMatrix<double,aligned,unpadded,rowMajor> AlignedUnpadded; AlignedUnpadded C( blaze::allocate<double>( 192UL ), 12UL, 13UL, 16UL, blaze::Deallocate ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays typedef CustomMatrix<complex<double>,aligned,padded,columnMajor> AlignedPadded; AlignedPadded D( blaze::allocate<double>( 112UL ), 7UL, 14UL, 16UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // When constructing a custom matrix there are two choices: Either a user manually manages the // array of elements outside the custom matrix, or alternatively passes the responsibility for // the memory management to an instance of CustomMatrix. In the second case the CustomMatrix // class employs shared ownership between all copies of the custom matrix, which reference the // same array. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a 3x4 custom row-major matrix for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom matrix by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom matrix. CustomMatrix<int,unaligned,unpadded,rowMajor> B( new int[12], 3UL, 4UL, ArrayDelete() ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). The memory management is passed // to the custom matrix by providing a deleter of type 'blaze::Deallocate'. CustomMatrix<int,aligned,padded> C( allocate<int>( 128UL ), 8UL, 12UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; typedef CustomMatrix<int,unaligned,unpadded> CustomType; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affect matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. In case a deleter has been provided to the first // custom matrix, both matrices share the responsibility to destroy the array when the last matrix // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; int* array = blaze::allocate<int>( 40UL ); // Is guaranteed to be 32-bit aligned CustomMatrix<int,aligned,padded,rowMajor> A( array, 5UL, 6UL, 8UL, Deallocate() ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomMatrix<double,aligned,padded> CustomType; // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomMatrix<double,aligned,unpadded> CustomType; // Creating unpadded custom 3x3 matrix CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense matrix classes can be directly initialized by means of an initializer // list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; \endcode // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M15( M6 ); // Instantiation of the dense row-major matrix M15 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M16( M8 ); // Instantiation of the dense column-major matrix M16 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M17( M7 ); // Instantiation of the compressed column-major matrix // M17 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M18( M8 ); // Instantiation of the compressed row-major matrix // M18 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M19( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M20( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense matrix: \code blaze::DynamicMatrix<double> M; M = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; \endcode // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore, // all matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. The first possibility to add elements to a sparse matrix is the function call // operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the matrix // the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In // contrast to the function call operator and the \c set() function it emits an exception in case // the element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row. Additionally, the // index of the new element must be larger than the index of the previous element in the same // row. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.finalize( 0 ); // Finalizing row 0 M1.append( 1, 1, 2 ); // Appending the value 2 in row 1 with column index 1 M1.finalize( 1 ); // Finalizing row 1 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \n \section matrix_operations_member_functions Member Functions // <hr> // // \subsection matrix_operations_rows .rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \n \subsection matrix_operations_capacity .capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType M1( 10UL, 20UL ); // Creating a 10x20 matrix RowType row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \section matrix_operations_free_functions Free Functions // <hr> // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse matrix, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -5, 2, 7 }, { 4, 0, 1 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B{ { -5, 2, -7 }, { -4, 0, -1 } }; min( A ); // Returns -5 min( B ); // Returns -7 max( A ); // Returns 7 max( B ); // Returns 2 \endcode // In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in // case a given sparse matrix is not completely filled, the zero elements are taken into account. // For example: the following compressed matrix has only 2 non-zero elements. However, the minimum // of this matrix is 0: \code blaze::CompressedMatrix<int> C( 2UL, 3UL ); C(0,0) = 1; C(0,2) = 3; min( C ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_floor_ceil floor() / ceil() // // The \c floor() and \c ceil() functions can be used to round down/up each element of a matrix, // respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil( A ); // Rounding up each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_clip clip() // // The \c clip() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clip( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = pow( A, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection matrix_operators_exp exp() // // \c exp() computes the base e exponential of each element of a matrix: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log10() // // The \c log() and \c log10() functions can be used to compute the natural and common logarithm // of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // matrices. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection matrix_operations_matrix_transpose trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // Additionally, matrices can be transposed in-place via the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_conjugate_transpose ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // when calling the \c invert() function: \code using blaze::asGeneral; using blaze::asSymmetric; using blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; invert<asGeneral> ( A ); // In-place inversion of a general matrix invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix invert<asLower> ( A ); // In-place inversion of a lower triangular matrix invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomSymmetric; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array CustomSymmetric B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; CompressedMatrix<float> E( 3, 3 ); // Empty row-major sparse single precision 3x3 matrix SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > F; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > G; F = A + B; // Matrix addition and assignment to a row-major symmetric matrix G = A - C; // Matrix subtraction and assignment to a column-major symmetric matrix G = A * E; // Matrix multiplication between a dense and a sparse matrix A *= 2.0; // In-place scaling of matrix A F = 2.0 * B; // Scaling of matrix B G = E * 2.0; // Scaling of matrix E F += A - B; // Addition assignment G -= A + C; // Subtraction assignment G *= A * E; // Multiplication assignment \endcode // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef SymmetricMatrix< DynamicMatrix<double,columnMajor> > DynamicSymmetric; DynamicSymmetric A( 10UL ); Row<DynamicSymmetric> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomHermitian; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array CustomHermitian B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; typedef complex<float> cplx; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef HermitianMatrix< DynamicMatrix<double,columnMajor> > DynamicHermitian; DynamicHermitian A( 10UL ); // Both Hermitian and symmetric Row<DynamicHermitian> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; typedef LowerMatrix< CompressedMatrix<double,rowMajor> > CompressedLower; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomLower; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array CustomLower B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices typedef blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> > SmallDiagonalMatrix; // Recommendation 2: use sparse matrices for large diagonal matrices typedef blaze::DiagonalMatrix< blaze::CompressedMatrix<float> > LargeDiagonalMatrix; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // and blaze::UpperMatrix within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); UpperMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; UpperMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix F = C - D; // Matrix subtraction and assignment to a column-major upper matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // Note that diagonal, unitriangular and strictly triangular matrix types can be used in the same // way, but may pose some additional restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row or column of a matrix. As such, views act as a reference to a specific part of a vector // or matrix. This reference is valid and can be used in every way as any other vector or matrix // can be used as long as the referenced vector or matrix is not resized or entirely destroyed. // Views also act as alias to the elements of the vector or matrix: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) via the view are immediately visible in // the vector or matrix and changes made via the vector or matrix are immediately visible in the // view. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_columns // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix // // ( 1 0 -2 3 0 ) // ( 0 2 5 -1 -1 ) // ( 1 0 0 2 1 ) // DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector // // ( 18 19 ) // StaticVector<int,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_class The Subvector Class Template // <hr> // // The blaze::Subvector class template represents a view on a specific subvector of a dense or // sparse vector primitive. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The type of the vector is specified via two template parameters: \code template< typename VT, bool AF > class Subvector; \endcode // - \c VT: specifies the type of the vector primitive. Subvector can be used with every vector // primitive or view, but does not work with any vector expression type. // - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. This view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. A subvector created from a row // vector can be used as any other row vector, a subvector created from a column vector can be // used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector // of a vector primitive on the left-hand side of an assignment or to grant read-access to a // specific subvector of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<int,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2; SparseVectorType s1, s2; // ... Resizing and initialization // Creating a view on the first ten elements of the dense vector d1 blaze::Subvector<DenseVectorType> dsv = subvector( d1, 0UL, 10UL ); // Creating a view on the second ten elements of the sparse vector s1 blaze::Subvector<SparseVectorType> ssv = subvector( s1, 10UL, 10UL ); // Creating a view on the addition of d2 and s2 dsv = subvector( d2 + s2, 5UL, 10UL ); // Creating a view on the multiplication of d2 and s2 ssv = subvector( d2 * s2, 2UL, 10UL ); \endcode // The \c subvector() function can be used on any dense or sparse vector, including expressions, // as demonstrated in the example. Note however that a blaze::Subvector can only be instantiated // with a dense or sparse vector primitive, i.e. with types that can be written, and not with an // expression type. // // // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. For instance, the current // number of elements can be obtained via the \c size() function, the current capacity via the // \c capacity() function, and the number of non-zero elements via the \c nonZeros() function. // However, since subvectors are references to a specific range of a vector, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense subvector view: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v SubvectorType sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector SubvectorType sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an Iterator, which allows a manipulation of the non-zero values, in case // of constant subvectors a ConstIterator is returned: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the dense vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code typedef blaze::CompressedVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the sparse vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v( 256UL ); // Non-initialized vector of size 256 typedef blaze::Subvector<VectorType> SubvectorType; SubvectorType sv( subvector( v, 10UL, 60UL ) ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the vector it is inserted into the vector, if it is already contained // in the vector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2, d3; SparseVectorType s1, s2; // ... Resizing and initialization typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; typedef blaze::Subvector<DenseVectorType> SubvectorType; SubvectorType dsv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 dsv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = dsv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = dsv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= dsv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; DenseVectorType x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] blaze::Subvector<DenseVectorType> sv1 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType> sv2 = subvector<unaligned>( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv3 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv4 = subvector<unaligned>( x, 8UL, 16UL ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned dense subvector in the range [8..23] blaze::Subvector<DenseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; using blaze::columnVector; typedef blaze::DynamicVector<double,columnVector> VectorType; typedef blaze::Subvector<VectorType,aligned> SubvectorType; VectorType d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned SubvectorType dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned SubvectorType dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element SubvectorType dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned SubvectorType dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; SparseVectorType x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] blaze::Subvector<SparseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // \n \section views_subvectors_on_subvectors Subvectors on Subvectors // <hr> // // It is also possible to create a subvector view on another subvector. In this context it is // important to remember that the type returned by the \c subvector() function is the same type // as the type of the given subvector, not a nested subvector type, since the view on a subvector // is just another view on the underlying vector: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType d1; // ... Resizing and initialization // Creating a subvector view on the dense vector d1 SubvectorType sv1 = subvector( d1, 5UL, 10UL ); // Creating a subvector view on the dense subvector sv1 SubvectorType sv2 = subvector( sv1, 1UL, 5UL ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_class The Submatrix Class Template // <hr> // // The blaze::Submatrix class template represents a view on a specific submatrix of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The type of the matrix is specified via two template parameters: \code template< typename MT, bool AF > class Submatrix; \endcode // - \c MT: specifies the type of the matrix primitive. Submatrix can be used with every matrix // primitive, but does not work with any matrix expression type. // - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a submatrix can be created very conveniently via the \c submatrix() function. // This view can be treated as any other matrix, i.e. it can be assigned to, it can be copied // from, and it can be used in arithmetic operations. A submatrix created from a row-major // matrix will itself be a row-major matrix, a submatrix created from a column-major matrix // will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedVector<int,blaze::columnMajor> SparseMatrixType; DenseMatrixType D1, D2; SparseMatrixType S1, S2; // ... Resizing and initialization // Creating a view on the first 8x16 block of the dense matrix D1 blaze::Submatrix<DenseMatrixType> dsm = submatrix( D1, 0UL, 0UL, 8UL, 16UL ); // Creating a view on the second 8x16 block of the sparse matrix S1 blaze::Submatrix<SparseMatrixType> ssm = submatrix( S1, 0UL, 16UL, 8UL, 16UL ); // Creating a view on the addition of D2 and S2 dsm = submatrix( D2 + S2, 5UL, 10UL, 8UL, 16UL ); // Creating a view on the multiplication of D2 and S2 ssm = submatrix( D2 * S2, 7UL, 13UL, 8UL, 16UL ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // The current size of the matrix, i.e. the number of rows or columns can be obtained via the // \c rows() and \c columns() functions, the current total capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since submatrices // are views on a specific submatrix of a matrix, several operations are not possible on views, // such as resizing and swapping: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A; // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A SubmatrixType sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix SubmatrixType sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an Iterator, // which allows a manipulation of the non-zero values, in case of constant submatrices a // ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the dense matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code typedef blaze::CompressedMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the sparse matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 typedef blaze::Submatrix<MatrixType> SubmatrixType; SubmatrixType sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in case of sparse matrices, elements can also be inserted via the append() function. // In case of submatrices, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index in the according row or column of the // submatrix and that the according row's or column's capacity is large enough to hold the new // element. Note however that due to the nature of a submatrix, which may be an alias to the // middle of a sparse matrix, the append() function does not work as efficiently for a // submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; DenseMatrixType D1, D2, D3; SparseMatrixType S1, S2; typedef blaze::CompressedVector<double,blaze::columnVector> SparseVectorType; SparseVectorType a, b; // ... Resizing and initialization typedef Submatrix<DenseMatrixType> SubmatrixType; SubmatrixType sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType> sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType> sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm3 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm4 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; using blaze::rowMajor; typedef blaze::DynamicMatrix<double,rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType,aligned> SubmatrixType; MatrixType D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column SubmatrixType dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned SubmatrixType dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; SparseMatrixType A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<SparseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_submatrices Submatrices on Submatrices // <hr> // // It is also possible to create a submatrix view on another submatrix. In this context it is // important to remember that the type returned by the \c submatrix() function is the same type // as the type of the given submatrix, since the view on a submatrix is just another view on the // underlying matrix: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType D1; // ... Resizing and initialization // Creating a submatrix view on the dense matrix D1 SubmatrixType sm1 = submatrix( D1, 4UL, 4UL, 8UL, 16UL ); // Creating a submatrix view on the dense submatrix sm1 SubmatrixType sm2 = submatrix( sm1, 1UL, 1UL, 4UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Submatrix; typedef SymmetricMatrix< DynamicMatrix<int> > SymmetricDynamicType; typedef Submatrix< SymmetricDynamicType > SubmatrixType; // Setup of a 16x16 symmetric matrix SymmetricDynamicType A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 SubmatrixType sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_class The Row Class Template // <hr> // // The blaze::Row class template represents a reference to a specific row of a dense or sparse // matrix primitive. It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Row; \endcode // \c MT specifies the type of the matrix primitive. Row can be used with every matrix primitive, // but does not work with any matrix expression type. // // // \n \section views_rows_setup Setup of Rows // <hr> // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // This reference can be treated as any other row vector, i.e. it can be assigned to, it can be // copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,rowVector> DenseVectorType; typedef blaze::CompressedVector<double,rowVector> SparseVectorType; typedef blaze::DynamicMatrix<double,rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,rowMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x blaze::Row<DenseMatrixType> row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // The \c row() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, rows cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. For instance, the current number of elements // can be obtained via the \c size() function, the current capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since rows are // references to specific rows of a matrix, several operations are not possible on views, such // as resizing and swapping. The following example shows this by means of a dense row view: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A RowType row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix RowType row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of the row can be directly accessed with the subscript operator. The numbering // of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of // a row can be traversed via iterators. Just as with vectors, in case of non-const rows, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant row a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } for( RowType::ConstIterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 10UL, 100UL ); // Non-initialized 10x100 matrix typedef blaze::Row<MatrixType> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrix; DenseMatrix A( 4UL, 2UL ); // Non-initialized 4x2 matrix typedef blaze::Row<DenseMatrix> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st row of a column-major matrix A RowType row1 = row( A, 1UL ); for( RowType::Iterator it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices CompressedMatrix<double,columnMajor> A( 128UL, 128UL ); CompressedMatrix<double,columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... CompressedVector<double,rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. CompressedVector<double,rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_columns_class The Column Class Template // <hr> // // The blaze::Column class template represents a reference to a specific column of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Column; \endcode // \c MT specifies the type of the matrix primitive. Column can be used with every matrix // primitive, but does not work with any matrix expression type. // // // \n \section views_colums_setup Setup of Columns // <hr> // // Similar to the setup of a row, a reference to a dense or sparse column can be created very // conveniently via the \c column() function. This reference can be treated as any other column // vector, i.e. it can be assigned to, copied from, and be used in arithmetic operations. The // column can either be used as an alias to grant write access to a specific column of a matrix // primitive on the left-hand side of an assignment or to grant read-access to a specific column // of a matrix primitive or expression on the right-hand side of an assignment. The following // two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,columnVector> DenseVectorType; typedef blaze::CompressedVector<double,columnVector> SparseVectorType; typedef blaze::DynamicMatrix<double,columnMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,columnMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x blaze::Column<DenseMatrixType> col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // The \c column() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, columns cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. For instance, the current number of // elements can be obtained via the \c size() function, the current capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // columns are references to specific columns of a matrix, several operations are not possible on // views, such as resizing and swapping. The following example shows this by means of a dense // column view: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A ColumnType col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix ColumnType col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of the column can be directly accessed with the subscript operator. The numbering // of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of // a column can be traversed via iterators. Just as with vectors, in case of non-const columns, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant column a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } for( ColumnType::ConstIterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 100UL, 10UL ); // Non-initialized 10x100 matrix typedef blaze::Column<MatrixType> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 2UL, 4UL ); // Non-initialized 2x4 matrix typedef blaze::Column<DenseMatrix> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st column of a row-major matrix A ColumnType col1 = column( A, 1UL ); for( ColumnType::Iterator it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices CompressedMatrix<double,rowMajor> A( 128UL, 128UL ); CompressedMatrix<double,rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... CompressedVector<double,columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th column of the row-major matrix B with A. CompressedVector<double,columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix B would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // The matrix/matrix multiplication can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. Note however that the highest performance for a multiplication between two dense // matrices can be expected for two matrices with the same scalar element type. // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref custom_operations */ //************************************************************************************************* //**Custom Operations****************************************************************************** /*!\page custom_operations Custom Operations // // In addition to the provided operations on vectors and matrices it is possible to define custom // operations. For this purpose, \b Blaze provides the \c forEach() function, which allows to pass // the required operation via functor or lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ){ return std::sqrt( d ); } ); \endcode // This example demonstrates the most convenient way of defining a custom operation by passing a // lambda to the \c forEach() function. The lambda is executed on each single element of a dense // vector or matrix or each non-zero element of a sparse vector or matrix. // // Alternatively, it is possible to pass a custom functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = forEach( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c forEach() it must define a function call // operator, which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::cint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::cuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::cint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::cuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::cint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::cuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::cint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::cuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::cfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::cdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } simd_double_t load( simd_double_t a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( T a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_TYPE( T ); return sqrt( a ); } }; } // namespace blaze \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // One of the main motivations of the \b Blaze 1.x releases was to achieve maximum performance // on a single CPU core for all possible operations. However, today's CPUs are not single core // anymore, but provide several (homogeneous or heterogeneous) compute cores. In order to fully // exploit the performance potential of a multicore CPU, computations have to be parallelized // across all available cores of a CPU. For this purpose, \b Blaze provides three different // shared memory parallelization techniques: // // - \ref openmp_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // // In addition, \b Blaze provides means to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref custom_operations &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11 // threads are enabled on the command line, the OpenMP-based parallelization has priority and // is preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based on Boost // threads. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations // have priority, i.e. are preferred in case either is enabled in combination with the Boost // thread parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dotu( int n, const float* x, int incX, const float* y, int incY ); double dotu( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotu( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotu( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): \code namespace blaze { float dotc( int n, const float* x, int incX, const float* y, int incY ); double dotc( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotc( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotc( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()): \code namespace blaze { void axpy( int n, float alpha, const float* x, int incX, float* y, int incY ); void axpy( int n, double alpha, const double* x, int incX, double* y, int incY ); void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, complex<float>* y, int incY ); void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if the fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref configuration_files \n */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it might necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag of the vectors). Via the \c defaultTransposeFlag // value the default transpose flag for all vector of the \b Blaze library can be specified: \code constexpr bool defaultTransposeFlag = columnVector; \endcode // Valid settings for the \c defaultTransposeFlag are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c defaultStorageOrder value the default storage order for all // matrices of the \b Blaze library can be specified. \code constexpr bool defaultStorageOrder = rowMajor; \endcode // Valid settings for the \c defaultStorageOrder are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code constexpr size_t cacheSize = 3145728UL; \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // MIC intrinsics, depending on which instruction set is available. However, it is possible to // disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code constexpr bool usePadding = true; \endcode // If \c usePadding is set to \c true padding is enabled for all dense vectors and matrices, if // it is set to \c false padding is disabled. Note however that disabling padding can considerably // reduce the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code constexpr bool useStreaming = true; \endcode // If \c useStreaming is set to \c true streaming is enabled, if it is set to \c false streaming // is disabled. It is recommended to consult the target architecture's white papers to decide // whether streaming is beneficial or hurtful for performance. // // // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_example Example // <hr> // // The following example demonstrates a complete multiplication between a statically sized block // matrix and block vector: \code // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) typedef StaticMatrix<int,2UL,2UL,rowMajor> M2x2; typedef StaticVector<int,2UL,columnVector> V2; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) } { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_data_types \n */ //************************************************************************************************* //**Custom Data Types****************************************************************************** /*!\page custom_data_types Custom Data Types // // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct SubTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct MultTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct DivTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref error_reporting_customization \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref custom_data_types &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref error_reporting_customization */ //************************************************************************************************* #endif
GB_binop__minus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_08__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_04__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int16) // A*D function (colscale): GB (_AxD__minus_int16) // D*A function (rowscale): GB (_DxB__minus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int16) // C=scalar+B GB (_bind1st__minus_int16) // C=scalar+B' GB (_bind1st_tran__minus_int16) // C=A+scalar GB (_bind2nd__minus_int16) // C=A'+scalar GB (_bind2nd_tran__minus_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matfilter.c
#include "matrix.h" /** \brief Computes 2-D convolution * * \param[in] A Input matrix * \param[in] mask Input kernel/mask * \param[in] scratch Scratch matrix for temporary calculations * \param[in] result Matrix to store the result * \return Convolved output matrix * */ MATRIX mat_conv2(MATRIX A, MATRIX mask, MATRIX scratch, MATRIX result) { int i, j, k, l, m, n, o, p, ii, jj, mm, nn, flag = 0; m = MatCol(A); n = MatRow(A); o = MatCol(mask); p = MatRow(mask); if((o%2)!=1 ||(p%2)!=1) gen_error(GEN_SIZE_ERROR); ii = (p-1)/2; jj = (o-1)/2; mm = jj+jj+m; nn = ii+ii+n; l = jj+m; k = ii+n; if(scratch==NULL) { if((scratch = mat_creat(nn, mm, UNDEFINED))==NULL) mat_error(MAT_MALLOC); flag = 1; } #pragma omp parallel for private(j) firstprivate(mm, ii, jj, k, l) for(i=0; i<nn; ++i) { for(j=0; j<mm; ++j) { if(i<ii || j<jj || i>=k || j>=l ) scratch[i][j] = 0.0; else scratch[i][j] = A[i-ii][j-jj]; } } if(result==NULL) if((result = mat_creat(n, m, UNDEFINED))==NULL) mat_error(MAT_MALLOC); #pragma omp parallel for private(j) firstprivate(m, ii, jj, k, l) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { mtype acc = 0.0; for(k = -ii; k<=ii; ++k) { for(l = -jj; l<=jj; ++l) { acc += scratch[i+ii+k][j+jj+l]*mask[ii-k][jj-l]; } } result[i][j] = acc; } } if(flag==1) mat_free(scratch); return result; }
multiple_clients.c
/* * Performs communication with the server using multiple clients. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <assert.h> #include <omp.h> #include "cl_rt.h" #include "aira_runtime.h" #include "kernels.h" #define toNS( ts ) ((ts.tv_sec * 1000000000) + ts.tv_nsec) int main(int argc, char** argv) { struct kernel_features feats; struct resource_alloc alloc; struct timespec sleep, rem, start, end; cl_runtime rt; char tmp[8]; unsigned i, j; printf("Please start the server and press [enter] to continue..."); assert(fgets(tmp, sizeof(tmp), stdin)); rt = new_cl_runtime(false); sleep.tv_sec = 0; sleep.tv_nsec = 500000000; aira_conn conn; // Test notification - server should not block client for(i = 0; i < get_num_platforms(); i++) { for(j = 0; j < get_num_devices(i); j++) { printf("Testing notification with 2 threads (%u/%u)...\n", i, j); alloc.platform = i; alloc.device = j; alloc.compute_units = get_num_compute_units(rt, i, j); #pragma omp parallel num_threads(2) private(rem, start, end, conn) { conn = aira_init_conn(); clock_gettime(CLOCK_MONOTONIC, &start); aira_notify(conn, alloc); clock_gettime(CLOCK_MONOTONIC, &end); printf("%d took %lu NS to get allocation\n", omp_get_thread_num(), toNS(end) - toNS(start)); assert(!nanosleep(&sleep, &rem)); aira_kernel_finish(conn); aira_free_conn(conn); } } } // Test resource allocation printf("Testing resource allocation request (2 x BT.A)...\n"); feats.kernel = BT_A; #pragma omp parallel num_threads(2) private(rem, start, end, conn) { conn = aira_init_conn(); clock_gettime(CLOCK_MONOTONIC, &start); alloc = aira_alloc_resources(conn, &feats); clock_gettime(CLOCK_MONOTONIC, &end); printf("%d took %lu NS to get allocation\n", omp_get_thread_num(), toNS(end) - toNS(start)); assert(!nanosleep(&sleep, &rem)); aira_kernel_finish(conn); aira_free_conn(conn); } printf("Testing resource allocation request (%d x BT.A)...\n", omp_get_num_threads()); #pragma omp parallel private(rem, start, end, conn) { conn = aira_init_conn(); clock_gettime(CLOCK_MONOTONIC, &start); alloc = aira_alloc_resources(conn, &feats); clock_gettime(CLOCK_MONOTONIC, &end); printf("%d took %lu NS to get allocation\n", omp_get_thread_num(), toNS(end) - toNS(start)); assert(!nanosleep(&sleep, &rem)); aira_kernel_finish(conn); aira_free_conn(conn); } printf("Testing resource allocation request (2 X EP.C)...\n"); feats.kernel = EP_C; #pragma omp parallel num_threads(2) private(rem, start, end, conn) { conn = aira_init_conn(); clock_gettime(CLOCK_MONOTONIC, &start); alloc = aira_alloc_resources(conn, &feats); clock_gettime(CLOCK_MONOTONIC, &end); printf("%d took %lu NS to get allocation\n", omp_get_thread_num(), toNS(end) - toNS(start)); assert(!nanosleep(&sleep, &rem)); aira_kernel_finish(conn); aira_free_conn(conn); } printf("Testing resource allocation request (BT.A + EP.C)...\n"); #pragma omp parallel num_threads(2) private(feats, rem, start, end, conn) { conn = aira_init_conn(); switch(omp_get_thread_num()) { case 0: feats.kernel = BT_A; break; default: feats.kernel = EP_C; break; }; clock_gettime(CLOCK_MONOTONIC, &start); alloc = aira_alloc_resources(conn, &feats); clock_gettime(CLOCK_MONOTONIC, &end); printf("%d took %lu NS to get allocation\n", omp_get_thread_num(), toNS(end) - toNS(start)); assert(!nanosleep(&sleep, &rem)); aira_kernel_finish(conn); aira_free_conn(conn); } delete_cl_runtime(rt); printf("Check the server's log for allocations/running kernels\n"); return 0; }
program_evaluator.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2015 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: keir@google.com (Keir Mierle) // // The ProgramEvaluator runs the cost functions contained in each residual block // and stores the result into a jacobian. The particular type of jacobian is // abstracted out using two template parameters: // // - An "EvaluatePreparer" that is responsible for creating the array with // pointers to the jacobian blocks where the cost function evaluates to. // - A "JacobianWriter" that is responsible for storing the resulting // jacobian blocks in the passed sparse matrix. // // This abstraction affords an efficient evaluator implementation while still // supporting writing to multiple sparse matrix formats. For example, when the // ProgramEvaluator is parameterized for writing to block sparse matrices, the // residual jacobians are written directly into their final position in the // block sparse matrix by the user's CostFunction; there is no copying. // // The evaluation is threaded with OpenMP or TBB. // // The EvaluatePreparer and JacobianWriter interfaces are as follows: // // class EvaluatePreparer { // // Prepare the jacobians array for use as the destination of a call to // // a cost function's evaluate method. // void Prepare(const ResidualBlock* residual_block, // int residual_block_index, // SparseMatrix* jacobian, // double** jacobians); // } // // class JacobianWriter { // // Create a jacobian that this writer can write. Same as // // Evaluator::CreateJacobian. // SparseMatrix* CreateJacobian() const; // // // Create num_threads evaluate preparers. Caller owns result which must // // be freed with delete[]. Resulting preparers are valid while *this is. // EvaluatePreparer* CreateEvaluatePreparers(int num_threads); // // // Write the block jacobians from a residual block evaluation to the // // larger sparse jacobian. // void Write(int residual_id, // int residual_offset, // double** jacobians, // SparseMatrix* jacobian); // } // // Note: The ProgramEvaluator is not thread safe, since internally it maintains // some per-thread scratch space. #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_ #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_ // This include must come before any #ifndef check on Ceres compile options. #include "ceres/internal/port.h" #include <map> #include <memory> #include <string> #include <vector> #include "ceres/evaluation_callback.h" #include "ceres/execution_summary.h" #include "ceres/internal/eigen.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" #include "ceres/scoped_thread_token.h" #include "ceres/small_blas.h" #include "ceres/thread_token_provider.h" #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) #include <atomic> #include "ceres/parallel_for.h" #endif namespace ceres { namespace internal { struct NullJacobianFinalizer { void operator()(SparseMatrix* jacobian, int num_parameters) {} }; template<typename EvaluatePreparer, typename JacobianWriter, typename JacobianFinalizer = NullJacobianFinalizer> class ProgramEvaluator : public Evaluator { public: ProgramEvaluator(const Evaluator::Options &options, Program* program) : options_(options), program_(program), jacobian_writer_(options, program), evaluate_preparers_( jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) { #ifdef CERES_NO_THREADS if (options_.num_threads > 1) { LOG(WARNING) << "Neither OpenMP nor TBB support is compiled into this binary; " << "only options.num_threads = 1 is supported. Switching " << "to single threaded mode."; options_.num_threads = 1; } #endif // CERES_NO_THREADS BuildResidualLayout(*program, &residual_layout_); evaluate_scratch_.reset(CreateEvaluatorScratch(*program, options.num_threads)); } // Implementation of Evaluator interface. SparseMatrix* CreateJacobian() const { return jacobian_writer_.CreateJacobian(); } bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options, const double* state, double* cost, double* residuals, double* gradient, SparseMatrix* jacobian) { ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_); ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian", &execution_summary_); // The parameters are stateful, so set the state before evaluating. if (!program_->StateVectorToParameterBlocks(state)) { return false; } // Notify the user about a new evaluation point if they are interested. if (options_.evaluation_callback != NULL) { program_->CopyParameterBlockStateToUserState(); options_.evaluation_callback->PrepareForEvaluation( /*jacobians=*/(gradient != NULL || jacobian != NULL), evaluate_options.new_evaluation_point); } if (residuals != NULL) { VectorRef(residuals, program_->NumResiduals()).setZero(); } if (jacobian != NULL) { jacobian->SetZero(); } // Each thread gets it's own cost and evaluate scratch space. for (int i = 0; i < options_.num_threads; ++i) { evaluate_scratch_[i].cost = 0.0; if (gradient != NULL) { VectorRef(evaluate_scratch_[i].gradient.get(), program_->NumEffectiveParameters()).setZero(); } } const int num_residual_blocks = program_->NumResidualBlocks(); #if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) ThreadTokenProvider thread_token_provider(options_.num_threads); #endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) #ifdef CERES_USE_OPENMP // This bool is used to disable the loop if an error is encountered // without breaking out of it. The remaining loop iterations are still run, // but with an empty body, and so will finish quickly. bool abort = false; #pragma omp parallel for num_threads(options_.num_threads) for (int i = 0; i < num_residual_blocks; ++i) { // Disable the loop instead of breaking, as required by OpenMP. #pragma omp flush(abort) #endif // CERES_USE_OPENMP #ifdef CERES_NO_THREADS bool abort = false; for (int i = 0; i < num_residual_blocks; ++i) { #endif // CERES_NO_THREADS #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) std::atomic_bool abort(false); ParallelFor(options_.context, 0, num_residual_blocks, options_.num_threads, [&](int thread_id, int i) { #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) if (abort) { #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) return; #else continue; #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) } #if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) const ScopedThreadToken scoped_thread_token(&thread_token_provider); const int thread_id = scoped_thread_token.token(); #endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) EvaluatePreparer* preparer = &evaluate_preparers_[thread_id]; EvaluateScratch* scratch = &evaluate_scratch_[thread_id]; // Prepare block residuals if requested. const ResidualBlock* residual_block = program_->residual_blocks()[i]; double* block_residuals = NULL; if (residuals != NULL) { block_residuals = residuals + residual_layout_[i]; } else if (gradient != NULL) { block_residuals = scratch->residual_block_residuals.get(); } // Prepare block jacobians if requested. double** block_jacobians = NULL; if (jacobian != NULL || gradient != NULL) { preparer->Prepare(residual_block, i, jacobian, scratch->jacobian_block_ptrs.get()); block_jacobians = scratch->jacobian_block_ptrs.get(); } // Evaluate the cost, residuals, and jacobians. double block_cost; if (!residual_block->Evaluate( evaluate_options.apply_loss_function, &block_cost, block_residuals, block_jacobians, scratch->residual_block_evaluate_scratch.get())) { abort = true; #ifdef CERES_USE_OPENMP // This ensures that the OpenMP threads have a consistent view of 'abort'. Do // the flush inside the failure case so that there is usually only one // synchronization point per loop iteration instead of two. #pragma omp flush(abort) #endif // CERES_USE_OPENMP #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) return; #else continue; #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) } scratch->cost += block_cost; // Store the jacobians, if they were requested. if (jacobian != NULL) { jacobian_writer_.Write(i, residual_layout_[i], block_jacobians, jacobian); } // Compute and store the gradient, if it was requested. if (gradient != NULL) { int num_residuals = residual_block->NumResiduals(); int num_parameter_blocks = residual_block->NumParameterBlocks(); for (int j = 0; j < num_parameter_blocks; ++j) { const ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; if (parameter_block->IsConstant()) { continue; } MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( block_jacobians[j], num_residuals, parameter_block->LocalSize(), block_residuals, scratch->gradient.get() + parameter_block->delta_offset()); } } } #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) ); #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) if (!abort) { const int num_parameters = program_->NumEffectiveParameters(); // Sum the cost and gradient (if requested) from each thread. (*cost) = 0.0; if (gradient != NULL) { VectorRef(gradient, num_parameters).setZero(); } for (int i = 0; i < options_.num_threads; ++i) { (*cost) += evaluate_scratch_[i].cost; if (gradient != NULL) { VectorRef(gradient, num_parameters) += VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters); } } // Finalize the Jacobian if it is available. // `num_parameters` is passed to the finalizer so that additional // storage can be reserved for additional diagonal elements if // necessary. if (jacobian != NULL) { JacobianFinalizer f; f(jacobian, num_parameters); } } return !abort; } bool Plus(const double* state, const double* delta, double* state_plus_delta) const { return program_->Plus(state, delta, state_plus_delta); } int NumParameters() const { return program_->NumParameters(); } int NumEffectiveParameters() const { return program_->NumEffectiveParameters(); } int NumResiduals() const { return program_->NumResiduals(); } virtual std::map<std::string, CallStatistics> Statistics() const { return execution_summary_.statistics(); } private: // Per-thread scratch space needed to evaluate and store each residual block. struct EvaluateScratch { void Init(int max_parameters_per_residual_block, int max_scratch_doubles_needed_for_evaluate, int max_residuals_per_residual_block, int num_parameters) { residual_block_evaluate_scratch.reset( new double[max_scratch_doubles_needed_for_evaluate]); gradient.reset(new double[num_parameters]); VectorRef(gradient.get(), num_parameters).setZero(); residual_block_residuals.reset( new double[max_residuals_per_residual_block]); jacobian_block_ptrs.reset( new double*[max_parameters_per_residual_block]); } double cost; std::unique_ptr<double[]> residual_block_evaluate_scratch; // The gradient in the local parameterization. std::unique_ptr<double[]> gradient; // Enough space to store the residual for the largest residual block. std::unique_ptr<double[]> residual_block_residuals; std::unique_ptr<double*[]> jacobian_block_ptrs; }; static void BuildResidualLayout(const Program& program, std::vector<int>* residual_layout) { const std::vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); residual_layout->resize(program.NumResidualBlocks()); int residual_pos = 0; for (int i = 0; i < residual_blocks.size(); ++i) { const int num_residuals = residual_blocks[i]->NumResiduals(); (*residual_layout)[i] = residual_pos; residual_pos += num_residuals; } } // Create scratch space for each thread evaluating the program. static EvaluateScratch* CreateEvaluatorScratch(const Program& program, int num_threads) { int max_parameters_per_residual_block = program.MaxParametersPerResidualBlock(); int max_scratch_doubles_needed_for_evaluate = program.MaxScratchDoublesNeededForEvaluate(); int max_residuals_per_residual_block = program.MaxResidualsPerResidualBlock(); int num_parameters = program.NumEffectiveParameters(); EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads]; for (int i = 0; i < num_threads; i++) { evaluate_scratch[i].Init(max_parameters_per_residual_block, max_scratch_doubles_needed_for_evaluate, max_residuals_per_residual_block, num_parameters); } return evaluate_scratch; } Evaluator::Options options_; Program* program_; JacobianWriter jacobian_writer_; std::unique_ptr<EvaluatePreparer[]> evaluate_preparers_; std::unique_ptr<EvaluateScratch[]> evaluate_scratch_; std::vector<int> residual_layout_; ::ceres::internal::ExecutionSummary execution_summary_; }; } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
GB_binop__bclr_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int32) // C=scalar+B GB (_bind1st__bclr_int32) // C=scalar+B' GB (_bind1st_tran__bclr_int32) // C=A+scalar GB (_bind2nd__bclr_int32) // C=A'+scalar GB (_bind2nd_tran__bclr_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_BITCLR (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT32 || GxB_NO_BCLR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bclr_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bclr_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bclr_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
program_evaluator.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2010, 2011, 2012 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: keir@google.com (Keir Mierle) // // The ProgramEvaluator runs the cost functions contained in each residual block // and stores the result into a jacobian. The particular type of jacobian is // abstracted out using two template parameters: // // - An "EvaluatePreparer" that is responsible for creating the array with // pointers to the jacobian blocks where the cost function evaluates to. // - A "JacobianWriter" that is responsible for storing the resulting // jacobian blocks in the passed sparse matrix. // // This abstraction affords an efficient evaluator implementation while still // supporting writing to multiple sparse matrix formats. For example, when the // ProgramEvaluator is parameterized for writing to block sparse matrices, the // residual jacobians are written directly into their final position in the // block sparse matrix by the user's CostFunction; there is no copying. // // The evaluation is threaded with OpenMP. // // The EvaluatePreparer and JacobianWriter interfaces are as follows: // // class EvaluatePreparer { // // Prepare the jacobians array for use as the destination of a call to // // a cost function's evaluate method. // void Prepare(const ResidualBlock* residual_block, // int residual_block_index, // SparseMatrix* jacobian, // double** jacobians); // } // // class JacobianWriter { // // Create a jacobian that this writer can write. Same as // // Evaluator::CreateJacobian. // SparseMatrix* CreateJacobian() const; // // // Create num_threads evaluate preparers. Caller owns result which must // // be freed with delete[]. Resulting preparers are valid while *this is. // EvaluatePreparer* CreateEvaluatePreparers(int num_threads); // // // Write the block jacobians from a residual block evaluation to the // // larger sparse jacobian. // void Write(int residual_id, // int residual_offset, // double** jacobians, // SparseMatrix* jacobian); // } // // Note: The ProgramEvaluator is not thread safe, since internally it maintains // some per-thread scratch space. #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_ #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_ // This include must come before any #ifndef check on Ceres compile options. #include "ceres/internal/port.h" #ifdef CERES_USE_OPENMP #include <omp.h> #endif #include <map> #include <string> #include <vector> #include "ceres/execution_summary.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" #include "ceres/small_blas.h" namespace ceres { namespace internal { struct NullJacobianFinalizer { void operator()(SparseMatrix* jacobian, int num_parameters) {} }; template<typename EvaluatePreparer, typename JacobianWriter, typename JacobianFinalizer = NullJacobianFinalizer> class ProgramEvaluator : public Evaluator { public: ProgramEvaluator(const Evaluator::Options &options, Program* program) : options_(options), program_(program), jacobian_writer_(options, program), evaluate_preparers_( jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) { #ifndef CERES_USE_OPENMP CHECK_EQ(1, options_.num_threads) << "OpenMP support is not compiled into this binary; " << "only options.num_threads=1 is supported."; #endif BuildResidualLayout(*program, &residual_layout_); evaluate_scratch_.reset(CreateEvaluatorScratch(*program, options.num_threads)); } // Implementation of Evaluator interface. SparseMatrix* CreateJacobian() const { return jacobian_writer_.CreateJacobian(); } bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options, const double* state, double* cost, double* residuals, double* gradient, SparseMatrix* jacobian) { ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_); ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian", &execution_summary_); // The parameters are stateful, so set the state before evaluating. if (!program_->StateVectorToParameterBlocks(state)) { return false; } if (residuals != NULL) { VectorRef(residuals, program_->NumResiduals()).setZero(); } if (jacobian != NULL) { jacobian->SetZero(); } // Each thread gets it's own cost and evaluate scratch space. for (int i = 0; i < options_.num_threads; ++i) { evaluate_scratch_[i].cost = 0.0; if (gradient != NULL) { VectorRef(evaluate_scratch_[i].gradient.get(), program_->NumEffectiveParameters()).setZero(); } } // This bool is used to disable the loop if an error is encountered // without breaking out of it. The remaining loop iterations are still run, // but with an empty body, and so will finish quickly. bool abort = false; int num_residual_blocks = program_->NumResidualBlocks(); #pragma omp parallel for num_threads(options_.num_threads) for (int i = 0; i < num_residual_blocks; ++i) { // Disable the loop instead of breaking, as required by OpenMP. #pragma omp flush(abort) if (abort) { continue; } #ifdef CERES_USE_OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif EvaluatePreparer* preparer = &evaluate_preparers_[thread_id]; EvaluateScratch* scratch = &evaluate_scratch_[thread_id]; // Prepare block residuals if requested. const ResidualBlock* residual_block = program_->residual_blocks()[i]; double* block_residuals = NULL; if (residuals != NULL) { block_residuals = residuals + residual_layout_[i]; } else if (gradient != NULL) { block_residuals = scratch->residual_block_residuals.get(); } // Prepare block jacobians if requested. double** block_jacobians = NULL; if (jacobian != NULL || gradient != NULL) { preparer->Prepare(residual_block, i, jacobian, scratch->jacobian_block_ptrs.get()); block_jacobians = scratch->jacobian_block_ptrs.get(); } // Evaluate the cost, residuals, and jacobians. double block_cost; if (!residual_block->Evaluate( evaluate_options.apply_loss_function, &block_cost, block_residuals, block_jacobians, scratch->residual_block_evaluate_scratch.get())) { abort = true; // This ensures that the OpenMP threads have a consistent view of 'abort'. Do // the flush inside the failure case so that there is usually only one // synchronization point per loop iteration instead of two. #pragma omp flush(abort) continue; } scratch->cost += block_cost; // Store the jacobians, if they were requested. if (jacobian != NULL) { jacobian_writer_.Write(i, residual_layout_[i], block_jacobians, jacobian); } // Compute and store the gradient, if it was requested. if (gradient != NULL) { int num_residuals = residual_block->NumResiduals(); int num_parameter_blocks = residual_block->NumParameterBlocks(); for (int j = 0; j < num_parameter_blocks; ++j) { const ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; if (parameter_block->IsConstant()) { continue; } MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( block_jacobians[j], num_residuals, parameter_block->LocalSize(), block_residuals, scratch->gradient.get() + parameter_block->delta_offset()); } } } if (!abort) { const int num_parameters = program_->NumEffectiveParameters(); // Sum the cost and gradient (if requested) from each thread. (*cost) = 0.0; if (gradient != NULL) { VectorRef(gradient, num_parameters).setZero(); } for (int i = 0; i < options_.num_threads; ++i) { (*cost) += evaluate_scratch_[i].cost; if (gradient != NULL) { VectorRef(gradient, num_parameters) += VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters); } } // Finalize the Jacobian if it is available. // `num_parameters` is passed to the finalizer so that additional // storage can be reserved for additional diagonal elements if // necessary. if (jacobian != NULL) { JacobianFinalizer f; f(jacobian, num_parameters); } } return !abort; } bool Plus(const double* state, const double* delta, double* state_plus_delta) const { return program_->Plus(state, delta, state_plus_delta); } int NumParameters() const { return program_->NumParameters(); } int NumEffectiveParameters() const { return program_->NumEffectiveParameters(); } int NumResiduals() const { return program_->NumResiduals(); } virtual map<string, int> CallStatistics() const { return execution_summary_.calls(); } virtual map<string, double> TimeStatistics() const { return execution_summary_.times(); } private: // Per-thread scratch space needed to evaluate and store each residual block. struct EvaluateScratch { void Init(int max_parameters_per_residual_block, int max_scratch_doubles_needed_for_evaluate, int max_residuals_per_residual_block, int num_parameters) { residual_block_evaluate_scratch.reset( new double[max_scratch_doubles_needed_for_evaluate]); gradient.reset(new double[num_parameters]); VectorRef(gradient.get(), num_parameters).setZero(); residual_block_residuals.reset( new double[max_residuals_per_residual_block]); jacobian_block_ptrs.reset( new double*[max_parameters_per_residual_block]); } double cost; scoped_array<double> residual_block_evaluate_scratch; // The gradient in the local parameterization. scoped_array<double> gradient; // Enough space to store the residual for the largest residual block. scoped_array<double> residual_block_residuals; scoped_array<double*> jacobian_block_ptrs; }; static void BuildResidualLayout(const Program& program, vector<int>* residual_layout) { const vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); residual_layout->resize(program.NumResidualBlocks()); int residual_pos = 0; for (int i = 0; i < residual_blocks.size(); ++i) { const int num_residuals = residual_blocks[i]->NumResiduals(); (*residual_layout)[i] = residual_pos; residual_pos += num_residuals; } } // Create scratch space for each thread evaluating the program. static EvaluateScratch* CreateEvaluatorScratch(const Program& program, int num_threads) { int max_parameters_per_residual_block = program.MaxParametersPerResidualBlock(); int max_scratch_doubles_needed_for_evaluate = program.MaxScratchDoublesNeededForEvaluate(); int max_residuals_per_residual_block = program.MaxResidualsPerResidualBlock(); int num_parameters = program.NumEffectiveParameters(); EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads]; for (int i = 0; i < num_threads; i++) { evaluate_scratch[i].Init(max_parameters_per_residual_block, max_scratch_doubles_needed_for_evaluate, max_residuals_per_residual_block, num_parameters); } return evaluate_scratch; } Evaluator::Options options_; Program* program_; JacobianWriter jacobian_writer_; scoped_array<EvaluatePreparer> evaluate_preparers_; scoped_array<EvaluateScratch> evaluate_scratch_; vector<int> residual_layout_; ::ceres::internal::ExecutionSummary execution_summary_; }; } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
mm_p_collapse.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int main(int argc, char* argv[]) { printf("entered main function!"); int size = 1024; int (*matrix_A)[size] = malloc(sizeof(int[size][size])); int (*matrix_B)[size] = malloc(sizeof(int[size][size])); int (*result)[size] = malloc(sizeof(int[size][size])); printf("const set-up done!"); //Initialize matrix: for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { matrix_A[i][j] = rand(); matrix_B[i][j] = rand(); result[i][j] = 0; } } printf("matrix initialization done!"); //Matrix multiplication double t1 = omp_get_wtime(); omp_set_num_threads(16); #pragma omp parallel for collapse(3) shared(matrix_A,matrix_B,result) for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { for (int k = 0; k < size; k++) { result[i][j] += matrix_A[i][k] * matrix_B[k][j]; } } } printf("matrix multiplication done!"); double t2 = omp_get_wtime(); printf("%f\n", t2 - t1); free(matrix_A); free(matrix_B); free(result); return 0; }
GB_unop__minv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint32_uint32) // op(A') function: GB (_unop_tran__minv_uint32_uint32) // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 32) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint32_uint32) ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint32_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi-ompacc-opt1.c
// Using target data to promote data allocation to higher level, enabling reusing in iterations #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t,(struct timezone*)NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n,m,mits; #define REAL float // flexible between float and double REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!! REAL tol,relax=1.0,alpha=0.0543; REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; REAL dx,dy; int main (void) { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { REAL omega; int i,j,k; REAL error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; // An optimization on top of naive coding: promoting data handling outside the while loop // data properties may change since the scope is bigger: #pragma omp target data map(to:n, m, omega, ax, ay, b, f[0:n][0:m]) map(tofrom:u[0:n][0:m]) map(alloc:uold[0:n][0:m]) while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ //#pragma omp parallel // { #pragma omp target map(to:n, m, u[0:n][0:m]) map(from:uold[0:n][0:m]) #pragma omp parallel for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp target map(to:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(from:u[0:n][0:m]) #pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } // } /* omp end parallel */ /* Error check */ if (k%500==0) printf("Finished %d iteration with error =%f\n",k, error); error = sqrt(error)/(n*m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); printf("Residual_ref :%E\n", resid_ref); printf ("Diff ref=%E\n", fabs(error-resid_ref)); assert (fabs(error-resid_ref) < 1E-14); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; REAL xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); printf("Solution Error Ref :%E \n",error_ref); printf ("Diff ref=%E\n", fabs(error-error_ref)); assert (fabs(error-error_ref) < 1E-14); }
GrB_BinaryOp_wait.c
//------------------------------------------------------------------------------ // GrB_BinaryOp_wait: wait for a user-defined GrB_BinaryOp to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_BinaryOp has no pending // operations to wait for. All this method does is verify that the op is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_BinaryOp_wait // no work, just check if the GrB_BinaryOp is valid ( GrB_BinaryOp *op ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE1 ("GrB_BinaryOp_wait (&op)") ; GB_RETURN_IF_NULL (op) ; GB_RETURN_IF_NULL_OR_FAULTY (*op) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
dlthread.c
/** * @file dlthread.c * @brief Functions for thread communicators. * @author Dominique LaSalle <lasalle@cs.umn.edu> * Copyright (c) 2014-2015, Dominique LaSalle * @version 1 * @date 2014-12-05 */ #ifndef DLTHREAD_C #define DLTHREAD_C #include "dlthread.h" /****************************************************************************** * THREAD LOCAL STORAGE ******************************************************** ******************************************************************************/ #if __STDC_VERSION__ >= 201101L /* C11 */ #define THREAD_LOCAL thread_local #elif defined(__GNUC__) || defined (__GNUG__) /* GNU Compliant */ #define THREAD_LOCAL __thread #elif defined(_MSC_VER) /* Microsoft */ #define THREAD_LOCAL __declspec(thread) #else /* break things */ THIS WILL NOT WORK WITHOUT SOMETYPE OF THREAD LOCAL STORAGE -- TRY A DIFFERENT COMPILER #endif /****************************************************************************** * OMP/PTHREAD SETUP *********************************************************** ******************************************************************************/ #ifndef CACHE_LINE_SIZE #define CACHE_LINE_SIZE 64 #endif #ifdef __DOMLIB_USE_PTHREADS #include <pthread.h> typedef pthread_barrier_t barrier_t; /* perhaps change these to inline functions to eliminate possible warnings */ #define init_lock(lock) pthread_init_mutex(lock,NULL) #define set_lock(lock) pthread_lock_mutex(lock) #define unset_lock(lock) pthread_unlock_mutex(lock) #define free_lock(lock) pthread_mutex_destroy(lock) #define init_barrier(bar,nthreads) pthread_barrier_init(bar,NULL,nthreads) #define free_barrier(bar) pthread_barrier_destroy(bar) static inline int wait_barrier( barrier_t * const bar, size_t const myid) { return pthread_barrier_wait(bar); } #else #include <omp.h> static size_t const IDX_OFFSET = CACHE_LINE_SIZE / sizeof(int); typedef struct barrier_t { size_t nthreads; int volatile * vec; } barrier_t; /* perhaps change these to inline functions to eliminate possible warnings */ #define init_lock(lock) omp_init_lock(lock) #define set_lock(lock) omp_set_lock(lock) #define unset_lock(lock) omp_unset_lock(lock) #define free_lock(lock) omp_destroy_lock(lock) static inline void init_barrier( barrier_t * const bar, size_t const nthreads) { bar->nthreads = nthreads; bar->vec = calloc(nthreads,CACHE_LINE_SIZE); } static inline void free_barrier( barrier_t * const bar) { size_t i; int volatile * mybar; mybar = bar->vec; /* check to make sure bar is not in use */ for (i=0;i<bar->nthreads;++i) { while (mybar[i*IDX_OFFSET] != 0) { _mm_pause(); } } /* keep the compiler from whining about freeing volatile memory */ dl_free((void*)bar->vec); } static inline void wait_barrier( barrier_t * const bar, size_t const myid) { int volatile * mybar; size_t lc, rc; DL_ASSERT(myid<bar->nthreads,"Invalid thread num %zu/%zu in barrier\n", \ myid,bar->nthreads); if (bar->nthreads == 1) { /* don't waste time for a single thread */ return; } else if ((int)bar->nthreads == omp_get_num_threads()) { /* all threads are in this barrier */ #pragma omp barrier } mybar = bar->vec; lc = (myid*2)+1; rc = (myid*2)+2; if (lc >= bar->nthreads) { lc = 0; } if (rc >= bar->nthreads) { rc = 0; } /* wait for my children to reach the barrier */ while ((lc && mybar[lc*IDX_OFFSET] != 1) || \ (rc && mybar[rc*IDX_OFFSET] != 1)) { _mm_pause(); } /* mark that I have reached the barrier */ mybar[myid*IDX_OFFSET] = 1; /* wait for the root thread to be finished */ while (mybar[0] != 1) { _mm_pause(); } /* barrier achieved -- now reset */ /* wait for my children to reset */ while ((lc && mybar[lc*IDX_OFFSET] == 1) || \ (rc && mybar[rc*IDX_OFFSET] == 1)) { _mm_pause(); } /* reset myself */ mybar[myid*IDX_OFFSET] = 2; /* wait for the root to reset */ while (mybar[0] == 1) { _mm_pause(); } /* so that when a barrier is free'd, we can be sure all threads are done * using the array */ mybar[myid*IDX_OFFSET] = 0; /* make sure we protect against synchronization breaking optimiztions */ __asm__ volatile("" : : : "memory"); } #endif /****************************************************************************** * TYPES *********************************************************************** ******************************************************************************/ typedef struct comm_t { int in_use; size_t nthreads; size_t bufsize; size_t nlarray; /* lock array for each thread */ void * buffer; dlthread_lock_t loc; dlthread_lock_t ** larray; barrier_t bar; } comm_t; typedef struct thread_arg_t { size_t id; void * ptr; void (*funptr)(void*); } thread_arg_t; /****************************************************************************** * CONSTANTS ******************************************************************* ******************************************************************************/ static size_t const __DEFAULT_BYTES_PER_THREAD = 128; #define __MAX_NCOMMS 1024 /****************************************************************************** * VARIABLES ******************************************************************* ******************************************************************************/ static dlthread_lock_t * ncomms_lock = NULL; static dlthread_comm_t last_free_comm = 1; /* space for root comm */ static comm_t my_comms[__MAX_NCOMMS]; static THREAD_LOCAL size_t my_ids[__MAX_NCOMMS]; static THREAD_LOCAL size_t __local_buffer_size = 0; static THREAD_LOCAL void * __local_buffer = NULL; /****************************************************************************** * PRIVATE FUNCTIONS *********************************************************** ******************************************************************************/ static void __config_comm( comm_t * const comm, size_t const nthreads) { size_t maxthreads; comm->in_use = 1; comm->nthreads = nthreads; comm->larray = NULL; /* handle data types up to size 16 and round up to a power of two */ maxthreads = size_uppow2(nthreads); comm->bufsize = size_uppow2((__DEFAULT_BYTES_PER_THREAD*maxthreads) + 4096); comm->buffer = malloc(comm->bufsize); init_lock(&comm->loc); init_barrier(&comm->bar,nthreads); } static void __thread_start( void * const ptr) { size_t myid; thread_arg_t * arg; arg = ptr; myid = arg->id; /* set my thread id for this communicator */ my_ids[DLTHREAD_COMM_ROOT] = myid; arg->funptr(arg->ptr); } /****************************************************************************** * PUBLIC FUNCTIONS ************************************************************ ******************************************************************************/ void dlthread_launch( size_t const nthreads, void (*funptr)(void*), void * const ptr) { size_t myid, i; comm_t * comm; comm = my_comms+DLTHREAD_COMM_ROOT; ncomms_lock = malloc(sizeof(dlthread_lock_t)); init_lock(ncomms_lock); __config_comm(comm,nthreads); #ifdef __DOMLIB_USE_PTHREADS size_t i; pthread_t * threads; thread_arg_t * args; threads = malloc(sizeof(pthread_t)*nthreads); args = malloc(sizeof(thread_arg_t)*nthreads); for (i=0;i<nthreads;++i) { args[i].id = i; args[i].ptr = ptr; args[i].funptr = funptr; pthread_create(threads+i,NULL,&__thread_start,args+i); } for (i=0;i<nthreads;++i) { pthread_join(threads[i],NULL); } dl_free(threads); dl_free(args); #else #pragma omp parallel num_threads(nthreads) { thread_arg_t arg; arg.id = omp_get_thread_num(); arg.ptr = ptr; arg.funptr = funptr; __thread_start(&arg); } #endif if (comm->larray) { for (myid=0;myid<nthreads;++myid) { /* destroy locks if they exist */ for (i=0;i<comm->nlarray;++i) { free_lock(comm->larray[myid]+i); } dl_free(comm->larray[myid]); } dl_free(comm->larray); } dl_free(comm->buffer); free_barrier(&(comm->bar)); free_lock(&(comm->loc)); comm->in_use = 0; free_lock(ncomms_lock); dl_free(ncomms_lock); } void dlthread_exclude( dlthread_comm_t const comm_idx) { comm_t * gcomm; if (comm_idx != DLTHREAD_COMM_SINGLE) { gcomm = my_comms+comm_idx; set_lock(&gcomm->loc); } } void dlthread_unexclude( dlthread_comm_t const comm_idx) { comm_t * gcomm; if (comm_idx != DLTHREAD_COMM_SINGLE) { gcomm = my_comms+comm_idx; unset_lock(&gcomm->loc); } } void dlthread_init_lock( dlthread_lock_t * const lock) { init_lock(lock); } void dlthread_free_lock( dlthread_lock_t * lock) { free_lock(lock); } void dlthread_set_lock( dlthread_lock_t * const lock) { set_lock(lock); } void dlthread_unset_lock( dlthread_lock_t * const lock) { unset_lock(lock); } void dlthread_init_locks( size_t const lpt, dlthread_comm_t const comm_idx) { size_t i, myid, nthreads; comm_t * gcomm; if (comm_idx != DLTHREAD_COMM_SINGLE) { myid = dlthread_get_id(comm_idx); nthreads = dlthread_get_nthreads(comm_idx); gcomm = my_comms+comm_idx; if (myid == 0) { gcomm->larray = malloc(sizeof(dlthread_lock_t*)*nthreads); gcomm->nlarray = lpt; } dlthread_barrier(comm_idx); gcomm->larray[myid] = malloc(sizeof(dlthread_lock_t)*lpt); for (i=0;i<lpt;++i) { init_lock(gcomm->larray[myid]+i); } dlthread_barrier(comm_idx); } } void dlthread_lock_index( size_t const tid, size_t const idx, dlthread_comm_t const comm_idx) { size_t i; comm_t * gcomm; if (comm_idx != DLTHREAD_COMM_SINGLE) { gcomm = my_comms+comm_idx; i = idx % gcomm->nlarray; set_lock(gcomm->larray[tid]+i); } } void dlthread_unlock_index( size_t const tid, size_t const idx, dlthread_comm_t const comm_idx) { size_t i; comm_t * gcomm; if (comm_idx != DLTHREAD_COMM_SINGLE) { gcomm = my_comms+comm_idx; i = idx % gcomm->nlarray; unset_lock(gcomm->larray[tid]+i); } } dlthread_comm_t dlthread_comm_split( size_t group, size_t ngroups, dlthread_comm_t const comm_idx) { size_t i, lid, cidx, myid, nthreads; size_t volatile * tid; comm_t * lcomm; dlthread_comm_t volatile * gcom; dlthread_comm_t cid; if (comm_idx != DLTHREAD_COMM_SINGLE) { myid = dlthread_get_id(comm_idx); nthreads = dlthread_get_nthreads(comm_idx); DL_ASSERT(group < ngroups,"Invalid group %zu/%zu for thread %zu/%zu\n", \ group,ngroups,myid,nthreads); gcom = dlthread_get_buffer((sizeof(dlthread_comm_t)*ngroups) + \ (sizeof(size_t)*nthreads),comm_idx); tid = (size_t*)(gcom+ngroups); if (myid == 0) { /* number the new communicators */ set_lock(ncomms_lock); for (i=0;i<ngroups;++i) { /* find the next free communicator */ while (my_comms[last_free_comm].in_use == 1) { ++last_free_comm; } gcom[i] = last_free_comm++; /* initialize my comm */ my_comms[gcom[i]].nthreads = 0; my_comms[gcom[i]].in_use = 1; } unset_lock(ncomms_lock); } /* I use an alarming number of barriers here -- someday reduce this */ dlthread_barrier(comm_idx); cid = gcom[group]; DL_ASSERT(cid < __MAX_NCOMMS,"Exceeded maximum number of communicators\n"); lcomm = my_comms+cid; tid[myid] = group; dlthread_barrier(comm_idx); if (myid == 0) { /* number the threads per communicator */ for (i=0;i<nthreads;++i) { cidx = gcom[tid[i]]; tid[i] = my_comms[cidx].nthreads++; } } dlthread_barrier(comm_idx); lid = tid[myid]; if (lid == 0) { /* root for each comm */ __config_comm(lcomm,lcomm->nthreads); } my_ids[cid] = lid; dlthread_barrier(comm_idx); dprintf("[%zu:%zu] new communicator %zu with %zu threads\n",myid,lid, \ (size_t)cid,lcomm->nthreads); } else { cid = DLTHREAD_COMM_SINGLE; } return cid; } void dlthread_comm_finalize( dlthread_comm_t const comm_idx) { size_t i, myid; comm_t * comm; if (comm_idx != DLTHREAD_COMM_SINGLE) { myid = dlthread_get_id(comm_idx); dlthread_barrier(comm_idx); comm = my_comms+comm_idx; if (comm->larray) { /* destroy locks if they exist */ for (i=0;i<comm->nlarray;++i) { free_lock(comm->larray[myid]+i); } dl_free(comm->larray[myid]); dlthread_barrier(comm_idx); if (myid == 0) { dl_free(comm->larray); } } if (myid == 0) { dl_free(comm->buffer); free_barrier(&(comm->bar)); free_lock(&(comm->loc)); comm->in_use = 0; set_lock(ncomms_lock); if (comm_idx < last_free_comm) { last_free_comm = comm_idx; } unset_lock(ncomms_lock); } } else { /* clear this threads local buffer if it exists */ if (__local_buffer) { dl_free(__local_buffer); __local_buffer = NULL; __local_buffer_size = 0; } } } size_t dlthread_get_id( dlthread_comm_t const comm) { if (comm != DLTHREAD_COMM_SINGLE) { return my_ids[comm]; } else { return 0; } } size_t dlthread_get_nthreads( dlthread_comm_t const comm) { if (comm != DLTHREAD_COMM_SINGLE) { return my_comms[comm].nthreads; } else { return 1; } } void * dlthread_get_shmem( size_t nbytes, dlthread_comm_t const comm_idx) { size_t myid; void * ptr; comm_t * comm; if (comm_idx != DLTHREAD_COMM_SINGLE) { myid = dlthread_get_id(comm_idx); comm = my_comms+comm_idx; DL_ASSERT(comm->buffer != NULL,"Null buffer on communicator %zu\n", \ (size_t)comm_idx); if (myid == 0) { ptr = malloc(nbytes); ((void**)comm->buffer)[0] = ptr; } dlthread_barrier(comm_idx); ptr = ((void**)comm->buffer)[0]; dlthread_barrier(comm_idx); } else { ptr = malloc(nbytes); } return ptr; } void dlthread_free_shmem( void * ptr, dlthread_comm_t const comm_idx) { size_t const myid = dlthread_get_id(comm_idx); dlthread_barrier(comm_idx); if (myid == 0) { dl_free(ptr); } } void dlthread_barrier( dlthread_comm_t const comm_idx) { if (comm_idx != DLTHREAD_COMM_SINGLE) { wait_barrier(&(my_comms[comm_idx].bar),dlthread_get_id(comm_idx)); } } void * dlthread_get_buffer( size_t const n, dlthread_comm_t const comm_idx) { void * buffer; comm_t * comm; size_t const myid = dlthread_get_id(comm_idx); if (comm_idx != DLTHREAD_COMM_SINGLE) { comm = my_comms+comm_idx; if (comm->bufsize < n) { dlthread_barrier(comm_idx); if (myid == 0) { dl_free(comm->buffer); comm->buffer = malloc(n); comm->bufsize = n; } dlthread_barrier(comm_idx); } buffer = comm->buffer; } else { if (__local_buffer_size < n) { __local_buffer = realloc(__local_buffer,n); __local_buffer_size = n; } buffer = __local_buffer; } return buffer; } #endif
sapB_fmt_plug.c
/* * this is a SAP-BCODE plugin for john the ripper. * tested on linux/x86 only, rest is up to you.. at least, someone did the reversing :-) * * please note: this code is in a "works for me"-state, feel free to modify/speed up/clean/whatever it... * * (c) x7d8 sap loverz, public domain, btw * cheers: see test-cases. * * Heavily modified by magnum 2011-2012 for performance and for SIMD, OMP and * encodings support. Copyright (c) 2011, 2012 magnum, and it is hereby released * to the general public under the following terms: Redistribution and use in * source and binary forms, with or without modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sapB; #elif FMT_REGISTERS_H john_register_one(&fmt_sapB); #else #include <string.h> #include <ctype.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "md5.h" #define FORMAT_LABEL "sapb" #define FORMAT_NAME "SAP CODVN B (BCODE)" #ifdef MMX_COEF #define NBKEYS (MMX_COEF * MD5_SSE_PARA) #define DO_MMX_MD5(in, out) SSEmd5body(in, (unsigned int*)out, NULL, SSEi_MIXED_IN) #endif #include "sse-intrinsics.h" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #if defined(_OPENMP) #include <omp.h> static unsigned int omp_t = 1; #ifdef MMX_COEF #define OMP_SCALE 128 #else #define OMP_SCALE 2048 #endif #endif #include "memdbg.h" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define SALT_FIELD_LENGTH 40 /* the max listed username length */ #define SALT_LENGTH 12 /* the max used username length */ #define PLAINTEXT_LENGTH 8 /* passwordlength max 8 chars */ #define CIPHERTEXT_LENGTH SALT_FIELD_LENGTH + 1 + 16 /* SALT + $ + 2x8 bytes for BCODE-representation */ #define BINARY_SIZE 8 /* half of md5 */ #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct saltstruct) #define SALT_ALIGN 4 #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*16*MMX_COEF*4 ) #define GETOUTPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*16*MMX_COEF) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define BCODE_ARRAY_LENGTH 3*16 static const unsigned char bcodeArr[BCODE_ARRAY_LENGTH] = { 0x14, 0x77, 0xf3, 0xd4, 0xbb, 0x71, 0x23, 0xd0, 0x03, 0xff, 0x47, 0x93, 0x55, 0xaa, 0x66, 0x91, 0xf2, 0x88, 0x6b, 0x99, 0xbf, 0xcb, 0x32, 0x1a, 0x19, 0xd9, 0xa7, 0x82, 0x22, 0x49, 0xa2, 0x51, 0xe2, 0xb7, 0x33, 0x71, 0x8b, 0x9f, 0x5d, 0x01, 0x44, 0x70, 0xae, 0x11, 0xef, 0x28, 0xf0, 0x0d }; /* char transition table for BCODE (from disp+work) */ static const unsigned char transtable[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x40, 0x41, 0x50, 0x43, 0x44, 0x45, 0x4b, 0x47, 0x48, 0x4d, 0x4e, 0x54, 0x51, 0x53, 0x46, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x56, 0x55, 0x5c, 0x49, 0x5d, 0x4a, 0x42, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x58, 0x5b, 0x59, 0xff, 0x52, //0x4c, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x4c, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, //0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x57, 0x5e, 0x5a, 0x4f, 0xff 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x57, 0x5e, 0x5a, 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; // For backwards compatibility, we must support salts padded with spaces to a field width of 40 static struct fmt_tests tests[] = { {"DDIC$C94E2F7DD0178374", "DDIC"}, // While "X" and "U" are not valid SAP passwords, they might still occur // if passwords longer than 8 characters are allowed, and if the CODVN B // password is calculated and stored in addition to the CODVN F or // CODVN H password. // Although a user picking "X Y" as a password is probably // not very likely. {"F $E3A65AAA9676060F", "X"}, // the 9 character password CYBERPUNK will be truncated to CYBERPUN {"JOHNNY $7F7207932E4DE471", "CYBERPUNK"}, {"VAN $487A2A40A7BA2258", "HAUSER"}, {"ROOT $8366A4E9E6B72CB0", "KID"}, {"MAN $9F48E7CE5B184D2E", "U"}, // "-------" is not a valid SAP password (first 3 characters are // identical) // ("^^^^^^^" would be allowed, since "^" also replaces arbitrary // non-ascii characters, as far as the CODVN B hash algorithm is // concerned) // {"------------$2CF190AF13E858A2", "-------"}, {"------------$058DE95926E00F32", "--+----"}, {"SAP*$7016BFF7C5472F1B", "MASTER"}, // password DOLLAR$$$--- will be truncated to DOLLAR$$ {"DOLLAR$$$---$C3413C498C48EB67", "DOLLAR$$$---"}, // Trigger suspected over-run of sum20. We do behave like SAP so it's // not a problem. {"12850413$1470EF2F683C956D", "46813230"}, {NULL} }; #define TEMP_ARRAY_SIZE 4*16 #define DEFAULT_OFFSET 15 static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int (*keyLen); #ifdef MMX_COEF static unsigned char (*saved_key); static unsigned char (*interm_key); static unsigned char (*crypt_key); static unsigned int (*clean_pos); #else static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/sizeof(ARCH_WORD_32)]; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; #endif static struct saltstruct { unsigned int l; unsigned char s[SALT_LENGTH]; } *cur_salt; static void init(struct fmt_main *self) { static int warned = 0; if (pers_opts.target_enc == UTF_8 && warned++ == 0) fprintf(stderr, "Warning: SAP-B format should never be UTF-8.\nUse --target-encoding=iso-8859-1 or whatever is applicable.\n"); #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT; #endif #ifdef MMX_COEF saved_key = mem_calloc_tiny(64 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); interm_key = mem_calloc_tiny(64 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); crypt_key = mem_calloc_tiny(16 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); clean_pos = mem_calloc_tiny(sizeof(*clean_pos) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #else saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #endif saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); keyLen = mem_calloc_tiny(sizeof(*keyLen) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { int i; char *p; if (!ciphertext) return 0; p = strrchr(ciphertext, '$'); if (!p) return 0; if (p - ciphertext > SALT_FIELD_LENGTH) return 0; if (strlen(&p[1]) != BINARY_SIZE * 2) return 0; for (i = 0; i < p - ciphertext; i++) { // even those lower case non-ascii characters with a // corresponding upper case character could be rejected if (ciphertext[i] >= 'a' && ciphertext[i] <= 'z') return 0; // SAP user names cannot be longer than 12 characters if (i >= SALT_LENGTH && ciphertext[i] != ' ') return 0; } // SAP user name cannot start with ! or ? if (ciphertext[0] == '!' || ciphertext[0] == '?') return 0; p++; // SAP and sap2john.pl always use upper case A-F for hashes, // so don't allow a-f for (i = 0; i < BINARY_SIZE * 2; i++) if (!(((p[i]>='0' && p[i]<='9')) || ((p[i]>='A' && p[i]<='F')) )) return 0; return 1; } static void set_salt(void *salt) { cur_salt = salt; } static void set_key(char *key, int index) { memcpy(saved_plain[index], key, PLAINTEXT_LENGTH); keyLen[index] = -1; } static char *get_key(int index) { int i; // Work-around for new self-test. if (keyLen[index] == -1) keyLen[index] = strlen(saved_plain[index]); for (i = 0; i < keyLen[index]; i++) { if (saved_plain[index][i] >= 'a' && saved_plain[index][i] <= 'z') saved_plain[index][i] ^= 0x20; else if (saved_plain[index][i] & 0x80) saved_plain[index][i] = '^'; } saved_plain[index][i] = 0; return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef MMX_COEF unsigned int x,y=0; #ifdef _OPENMP for(;y<MD5_SSE_PARA*omp_t;y++) #else for(;y<MD5_SSE_PARA;y++) #endif for(x = 0; x < MMX_COEF; x++) { if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*MMX_COEF*4+x] ) return 1; } return 0; #else int index; for (index = 0; index < count; index++) if (!memcmp(binary, crypt_key[index], BINARY_SIZE)) return 1; return 0; #endif } static int cmp_exact(char *source, int index) { return 1; } static int cmp_one(void * binary, int index) { #ifdef MMX_COEF unsigned int i,x,y; x = index&(MMX_COEF-1); y = index/MMX_COEF; for(i=0;i<(BINARY_SIZE/4);i++) if ( ((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[y*MMX_COEF*4+i*MMX_COEF+x] ) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static unsigned int walld0rf_magic(const int index, const unsigned char *temp_key, unsigned char *destArray) { unsigned int sum20, I1, I2, I3; const int len = keyLen[index]; #ifdef MMX_COEF #define key(i) saved_key[GETPOS(i, index)] #else #define key(i) saved_key[index][i] #endif // some magic in between....yes, byte 4 is ignored... // sum20 will be between 0x20 and 0x2F //sum20 = temp_key[5]%4 + temp_key[3]%4 + temp_key[2]%4 + temp_key[1]%4 + temp_key[0]%4 + 0x20; sum20 = *(unsigned int*)temp_key & 0x03030303; sum20 = (unsigned char)((sum20 >> 24) + (sum20 >> 16) + (sum20 >> 8) + sum20); sum20 += (temp_key[5] & 3) | 0x20; // Some unrolling if (temp_key[15] & 0x01) { destArray[0] = bcodeArr[47]; I2 = 1; } else { I2 = 0; } destArray[I2++] = key(0); destArray[I2++] = cur_salt->s[0]; destArray[I2] = bcodeArr[I2-2]; destArray[++I2] = 0; I2++; if( len >= 6) { I1 = 6; if( cur_salt->l >= 4 ) { // key >= 6 bytes, salt >= 4 bytes if (temp_key[14] & 0x01) destArray[I2++] = bcodeArr[46]; destArray[I2++] = key(1); destArray[I2++] = cur_salt->s[1]; destArray[I2] = bcodeArr[I2-4]; destArray[++I2] = 0; I2++; if (temp_key[13] & 0x01) destArray[I2++] = bcodeArr[45]; destArray[I2++] = key(2); destArray[I2++] = cur_salt->s[2]; destArray[I2] = bcodeArr[I2-6]; destArray[++I2] = 0; I2++; if (temp_key[12] & 0x01) destArray[I2++] = bcodeArr[44]; destArray[I2++] = key(3); destArray[I2++] = cur_salt->s[3]; destArray[I2] = bcodeArr[I2-8]; destArray[++I2] = 0; I2++; I3 = 4; if (temp_key[DEFAULT_OFFSET - 4] & 0x01) destArray[I2++] = bcodeArr[43]; destArray[I2++] = key(4); if (4 < cur_salt->l) destArray[I2++] = cur_salt->s[I3++]; destArray[I2] = bcodeArr[I2 - 5 - I3]; destArray[++I2] = 0; I2++; if (temp_key[DEFAULT_OFFSET - 5] & 0x01) destArray[I2++] = bcodeArr[42]; destArray[I2++] = key(5); if (5 < cur_salt->l) destArray[I2++] = cur_salt->s[I3++]; destArray[I2] = bcodeArr[I2 - 6 - I3]; destArray[++I2] = 0; I2++; if (6 < len) { if (temp_key[DEFAULT_OFFSET - 6] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 7]; destArray[I2++] = key(6); I1++; } if (6 < cur_salt->l) destArray[I2++] = cur_salt->s[I3++]; } else { // Key >= 6 bytes, salt < 4 Bytes I3 = 1; if (temp_key[DEFAULT_OFFSET - 1] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 2]; destArray[I2++] = key(1); if (1 < cur_salt->l) destArray[I2++] = cur_salt->s[I3++]; destArray[I2] = bcodeArr[I2 - 2 - I3]; destArray[++I2] = 0; I2++; if (temp_key[DEFAULT_OFFSET - 2] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 3]; destArray[I2++] = key(2); if (2 < cur_salt->l) destArray[I2++] = cur_salt->s[I3++]; destArray[I2] = bcodeArr[I2 - 3 - I3]; destArray[++I2] = 0; I2++; if (temp_key[DEFAULT_OFFSET - 3] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 4]; destArray[I2++] = key(3); destArray[I2] = bcodeArr[I2 - 4 - I3]; destArray[++I2] = 0; I2++; if (temp_key[DEFAULT_OFFSET - 4] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 5]; destArray[I2++] = key(4); destArray[I2] = bcodeArr[I2 - 5 - I3]; destArray[++I2] = 0; I2++; if (temp_key[DEFAULT_OFFSET - 5] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 6]; destArray[I2++] = key(5); destArray[I2] = bcodeArr[I2 - 6 - I3]; destArray[++I2] = 0; I2++; if (6 < len) { if (temp_key[DEFAULT_OFFSET - 6] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 7]; destArray[I2++] = key(6); I1++; } } destArray[I2] = bcodeArr[I2 - I1 - I3]; destArray[++I2] = 0; I2++; } else { I1 = I3 = 1; } // End of unrolling. Now the remaining bytes while(I2 < sum20) { if (I1 < len) { if (temp_key[DEFAULT_OFFSET - I1] & 0x01) destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - I1 - 1]; destArray[I2++] = key(I1); I1++; } if (I3 < cur_salt->l) destArray[I2++] = cur_salt->s[I3++]; destArray[I2] = bcodeArr[I2 - I1 - I3]; destArray[++I2] = 0; I2++; } #if MMX_COEF // This may be unaligned here, but after the aligned vector buffer // transfer, we will have no junk left from loop overrun *(unsigned int*)&destArray[sum20] = 0x00000080; #endif return sum20; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; #if MMX_COEF #if defined(_OPENMP) int t; #pragma omp parallel for for (t = 0; t < omp_t; t++) #define ti (t*NBKEYS+index) #else #define t 0 #define ti index #endif { unsigned int index, i; for (index = 0; index < NBKEYS; index++) { int len; if ((len = keyLen[ti]) < 0) { unsigned char *key; // Load key into vector buffer len = 0; key = (unsigned char*)saved_plain[ti]; while (*key) { saved_key[GETPOS(len, ti)] = transtable[*key++]; len++; } // Back-out of trailing spaces while(len && *--key == ' ') { len--; saved_key[GETPOS(len, ti)] = 0; } keyLen[ti] = len; } // Prepend the salt for (i = 0; i < cur_salt->l; i++) saved_key[GETPOS((len + i), ti)] = cur_salt->s[i]; saved_key[GETPOS((len + i), ti)] = 0x80; ((unsigned int *)saved_key)[14*MMX_COEF + (ti&3) + (ti>>2)*16*MMX_COEF] = (len + i) << 3; // Clean rest of buffer for (i = i + len + 1; i <= clean_pos[ti]; i++) saved_key[GETPOS(i, ti)] = 0; clean_pos[ti] = len + cur_salt->l; } DO_MMX_MD5(&saved_key[t*NBKEYS*64], &crypt_key[t*NBKEYS*16]); for (i = 0; i < MD5_SSE_PARA; i++) memset(&interm_key[t*64*NBKEYS+i*64*MMX_COEF+32*MMX_COEF], 0, 32*MMX_COEF); for (index = 0; index < NBKEYS; index++) { unsigned int sum20; unsigned char temp_key[BINARY_SIZE*2]; ARCH_WORD_32 destArray[TEMP_ARRAY_SIZE / 4]; const unsigned int *sw; unsigned int *dw; // Temporary flat copy of crypt sw = (unsigned int*)&crypt_key[GETOUTPOS(0, ti)]; dw = (unsigned int*)temp_key; for (i = 0; i < 4; i++, sw += MMX_COEF) *dw++ = *sw; //now: walld0rf-magic [tm], (c), <g> sum20 = walld0rf_magic(ti, temp_key, (unsigned char*)destArray); // Vectorize a word at a time dw = (unsigned int*)&interm_key[GETPOS(0, ti)]; for (i = 0;i <= sum20; i += 4, dw += MMX_COEF) *dw = destArray[i >> 2]; ((unsigned int *)interm_key)[14*MMX_COEF + (ti&3) + (ti>>2)*16*MMX_COEF] = sum20 << 3; } DO_MMX_MD5(&interm_key[t*NBKEYS*64], &crypt_key[t*NBKEYS*16]); for (index = 0; index < NBKEYS; index++) { *(ARCH_WORD_32*)&crypt_key[GETOUTPOS(0, ti)] ^= *(ARCH_WORD_32*)&crypt_key[GETOUTPOS(8, ti)]; *(ARCH_WORD_32*)&crypt_key[GETOUTPOS(4, ti)] ^= *(ARCH_WORD_32*)&crypt_key[GETOUTPOS(12, ti)]; } } #else #ifdef _OPENMP int t; #pragma omp parallel for for (t = 0; t < count; t++) #else #define t 0 #endif { unsigned char temp_key[BINARY_SIZE*2]; unsigned char final_key[BINARY_SIZE*2]; unsigned int i; unsigned int sum20; unsigned char destArray[TEMP_ARRAY_SIZE]; MD5_CTX ctx; if (keyLen[t] < 0) { keyLen[t] = strlen(saved_plain[t]); // Back-out of trailing spaces while ( saved_plain[t][keyLen[t] - 1] == ' ' ) { if (keyLen[t] == 0) break; saved_plain[t][--keyLen[t]] = 0; } for (i = 0; i < keyLen[t]; i++) saved_key[t][i] = transtable[ARCH_INDEX(saved_plain[t][i])]; } MD5_Init(&ctx); MD5_Update(&ctx, saved_key[t], keyLen[t]); MD5_Update(&ctx, cur_salt->s, cur_salt->l); MD5_Final(temp_key,&ctx); //now: walld0rf-magic [tm], (c), <g> sum20 = walld0rf_magic(t, temp_key, destArray); MD5_Init(&ctx); MD5_Update(&ctx, destArray, sum20); MD5_Final(final_key, &ctx); for (i = 0; i < 8; i++) ((char*)crypt_key[t])[i] = final_key[i + 8] ^ final_key[i]; } #endif return count; #undef t #undef ti } static void *binary(char *ciphertext) { static ARCH_WORD_32 binary[BINARY_SIZE / sizeof(ARCH_WORD_32)]; char *realcipher = (char*)binary; int i; char* newCiphertextPointer; newCiphertextPointer = strrchr(ciphertext, '$') + 1; for(i=0;i<BINARY_SIZE;i++) { realcipher[i] = atoi16[ARCH_INDEX(newCiphertextPointer[i*2])]*16 + atoi16[ARCH_INDEX(newCiphertextPointer[i*2+1])]; } return (void *)realcipher; } // Salt is already trimmed and 8-bit converted in split() static void *get_salt(char *ciphertext) { int i; static struct saltstruct out; /* We don't care about trailing garbage, but loader does */ memset(out.s, 0, sizeof(out.s)); out.l = (int)(strrchr(ciphertext, '$') - ciphertext); for (i = 0; i < out.l; ++i) out.s[i] = transtable[ARCH_INDEX(ciphertext[i])]; return &out; } // Here, we remove any salt padding, trim it to 12 bytes // and finally replace any 8-bit character with '^' static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; char *p; int i; p = strrchr(ciphertext, '$'); i = (int)(p - ciphertext) - 1; while (ciphertext[i] == ' ' || i >= SALT_LENGTH) i--; i++; memset(out, 0, sizeof(out)); memcpy(out, ciphertext, i); strnzcpy(&out[i], p, CIPHERTEXT_LENGTH + 1 - i); p = &out[i]; while(--p >= out) if (*p & 0x80) *p = '^'; return out; } #ifdef MMX_COEF #define HASH_OFFSET (index&(MMX_COEF-1))+(index/MMX_COEF)*MMX_COEF*4 static int get_hash_0(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xf; } static int get_hash_1(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xff; } static int get_hash_2(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xfff; } static int get_hash_3(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xffff; } static int get_hash_4(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xfffff; } static int get_hash_5(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xffffff; } static int get_hash_6(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0x7ffffff; } #else static int get_hash_0(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xF; } static int get_hash_1(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xFF; } static int get_hash_2(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xFFF; } static int get_hash_3(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xFFFF; } static int get_hash_4(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xFFFFF; } static int get_hash_5(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0xFFFFFF; } static int get_hash_6(int index) { return *(ARCH_WORD_32*)crypt_key[index] & 0x7FFFFFF; } #endif // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { struct saltstruct *s = (struct saltstruct*)salt; unsigned int hash = 5381; unsigned int i; for (i = 0; i < s->l; i++) hash = ((hash << 5) + hash) ^ s->s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_sapB = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_OMP | FMT_8_BIT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, split, binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
bodysystemcpu_impl.h
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "bodysystemcpu.h" #include <assert.h> #include <memory.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <helper_cuda.h> #include <algorithm> #include "tipsy.h" #ifdef OPENMP #include <omp.h> #endif template <typename T> BodySystemCPU<T>::BodySystemCPU(int numBodies) : m_numBodies(numBodies), m_bInitialized(false), m_force(0), m_softeningSquared(.00125f), m_damping(0.995f) { m_pos = 0; m_vel = 0; _initialize(numBodies); } template <typename T> BodySystemCPU<T>::~BodySystemCPU() { _finalize(); m_numBodies = 0; } template <typename T> void BodySystemCPU<T>::_initialize(int numBodies) { assert(!m_bInitialized); m_numBodies = numBodies; m_pos = new T[m_numBodies*4]; m_vel = new T[m_numBodies*4]; m_force = new T[m_numBodies*3]; memset(m_pos, 0, m_numBodies*4*sizeof(T)); memset(m_vel, 0, m_numBodies*4*sizeof(T)); memset(m_force, 0, m_numBodies*3*sizeof(T)); m_bInitialized = true; } template <typename T> void BodySystemCPU<T>::_finalize() { assert(m_bInitialized); delete [] m_pos; delete [] m_vel; delete [] m_force; m_bInitialized = false; } template <typename T> void BodySystemCPU<T>::loadTipsyFile(const std::string &filename) { if (m_bInitialized) _finalize(); vector< typename vec4<T>::Type > positions; vector< typename vec4<T>::Type > velocities; vector< int> ids; int nBodies = 0; int nFirst=0, nSecond=0, nThird=0; read_tipsy_file(positions, velocities, ids, filename, nBodies, nFirst, nSecond, nThird); _initialize(nBodies); memcpy(m_pos, &positions[0], sizeof(vec4<T>)*nBodies); memcpy(m_vel, &velocities[0], sizeof(vec4<T>)*nBodies); } template <typename T> void BodySystemCPU<T>::update(T deltaTime) { assert(m_bInitialized); _integrateNBodySystem(deltaTime); //std::swap(m_currentRead, m_currentWrite); } template <typename T> T *BodySystemCPU<T>::getArray(BodyArray array) { assert(m_bInitialized); T *data = 0; switch (array) { default: case BODYSYSTEM_POSITION: data = m_pos; break; case BODYSYSTEM_VELOCITY: data = m_vel; break; } return data; } template <typename T> void BodySystemCPU<T>::setArray(BodyArray array, const T *data) { assert(m_bInitialized); T *target = 0; switch (array) { default: case BODYSYSTEM_POSITION: target = m_pos; break; case BODYSYSTEM_VELOCITY: target = m_vel; break; } memcpy(target, data, m_numBodies*4*sizeof(T)); } template<typename T> T sqrt_T(T x) { return sqrt(x); } template<> float sqrt_T<float>(float x) { return sqrtf(x); } template <typename T> void bodyBodyInteraction(T accel[3], T posMass0[4], T posMass1[4], T softeningSquared) { T r[3]; // r_01 [3 FLOPS] r[0] = posMass1[0] - posMass0[0]; r[1] = posMass1[1] - posMass0[1]; r[2] = posMass1[2] - posMass0[2]; // d^2 + e^2 [6 FLOPS] T distSqr = r[0] * r[0] + r[1] * r[1] + r[2] * r[2]; distSqr += softeningSquared; // invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)] T invDist = (T)1.0 / (T)sqrt((double)distSqr); T invDistCube = invDist * invDist * invDist; // s = m_j * invDistCube [1 FLOP] T s = posMass1[3] * invDistCube; // (m_1 * r_01) / (d^2 + e^2)^(3/2) [6 FLOPS] accel[0] += r[0] * s; accel[1] += r[1] * s; accel[2] += r[2] * s; } template <typename T> void BodySystemCPU<T>::_computeNBodyGravitation() { #ifdef OPENMP #pragma omp parallel for #endif for (int i = 0; i < m_numBodies; i++) { int indexForce = 3*i; T acc[3] = {0, 0, 0}; // We unroll this loop 4X for a small performance boost. int j = 0; while (j < m_numBodies) { bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared); j++; bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared); j++; bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared); j++; bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared); j++; } m_force[indexForce ] = acc[0]; m_force[indexForce+1] = acc[1]; m_force[indexForce+2] = acc[2]; } } template <typename T> void BodySystemCPU<T>::_integrateNBodySystem(T deltaTime) { _computeNBodyGravitation(); #ifdef OPENMP #pragma omp parallel for #endif for (int i = 0; i < m_numBodies; ++i) { int index = 4*i; int indexForce = 3*i; T pos[3], vel[3], force[3]; pos[0] = m_pos[index+0]; pos[1] = m_pos[index+1]; pos[2] = m_pos[index+2]; T invMass = m_pos[index+3]; vel[0] = m_vel[index+0]; vel[1] = m_vel[index+1]; vel[2] = m_vel[index+2]; force[0] = m_force[indexForce+0]; force[1] = m_force[indexForce+1]; force[2] = m_force[indexForce+2]; // acceleration = force / mass; // new velocity = old velocity + acceleration * deltaTime vel[0] += (force[0] * invMass) * deltaTime; vel[1] += (force[1] * invMass) * deltaTime; vel[2] += (force[2] * invMass) * deltaTime; vel[0] *= m_damping; vel[1] *= m_damping; vel[2] *= m_damping; // new position = old position + velocity * deltaTime pos[0] += vel[0] * deltaTime; pos[1] += vel[1] * deltaTime; pos[2] += vel[2] * deltaTime; m_pos[index+0] = pos[0]; m_pos[index+1] = pos[1]; m_pos[index+2] = pos[2]; m_vel[index+0] = vel[0]; m_vel[index+1] = vel[1]; m_vel[index+2] = vel[2]; } }
tinyexr.h
/* Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifing layer name. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error // When the specified layer name is not found in the EXR file, the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(wll be filled when the function returns error code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #ifdef _MSC_VER #pragma warning(pop) #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct ChannelInfo { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; if ((data_width < 0) || (data_height < 0)) { if (err) { std::stringstream ss; ss << "Invalid data width or data height: " << data_width << ", " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); int err_code = TINYEXR_SUCCESS; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<size_t> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { size_t tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data size.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } if (tile_coordinates[3] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data length.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // TODO(LTE): atomic if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto &t : workers) { t.join(); } #else } #endif if (err_code != TINYEXR_SUCCESS) { return err_code; } exr_image->num_tiles = static_cast<int>(num_tiles); } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window[1]; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data width or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if (data_width > threshold) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > threshold) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader& exr_header, std::vector<std::string>& layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel (size_t i, std::string n) : index(i) , name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader& exr_header, const std::string layer_name, std::vector<LayerChannel>& channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer(exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // TOOD(LTE): C++11 thread // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } size_t totalSize = static_cast<size_t>(offset); { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } if (memory.size() == 0) { tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char *>(malloc(totalSize)); memcpy((*memory_out), &memory.at(0), memory.size()); unsigned char *memory_ptr = *memory_out + memory.size(); for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size()); memory_ptr += data_list[i].size(); } return totalSize; // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
gesummv.c
/** * gesummv.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define N SIZE /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i * N + j] * x[j] + tmp[i]; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummv_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { #pragma omp target map(to : A[ : N *N], B[ : N *N], x[ : N], tmp[ : N]) map(tofrom : y[ : N]) device(DEVICE_ID) #pragma omp teams distribute parallel for for (int i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp[i] = A[i * N + j] * x[j] + tmp[i]; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE *A, DATA_TYPE *x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE)i) / N; for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE)i * j) / N; } } } int compareResults(DATA_TYPE *y, DATA_TYPE *y_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < (N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *x; DATA_TYPE *y; DATA_TYPE *y_outputFromGpu; DATA_TYPE *tmp; A = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); x = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE *)calloc(N, sizeof(DATA_TYPE)); tmp = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, "<< Scalar, Vector and Matrix Multiplication >>\n"); init(A, x); t_start = rtclock(); gesummv_OMP(A, B, x, y_outputFromGpu, tmp); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); gesummv(A, B, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(y, y_outputFromGpu); #endif free(A); free(B); free(x); free(y); free(y_outputFromGpu); free(tmp); return fail; }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,270.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,90.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","(\"%.20gx%.20g%+.20g%+.20g\") `%s'", (double) geometry->width,(double) geometry->height, (double) geometry->x,(double) geometry->y,image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t PixelRoundOffset(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(CastDoubleToLong(floor(x))); return(CastDoubleToLong(ceil(x))); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ crop_image=NewImageList(); width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=(Image *) NULL; crop_image=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); if (status != MagickFalse) Update8BIMClipPath(extent_image,image->columns,image->rows,geometry); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { const char *artifact; Image *trim_image; RectangleInfo geometry, page; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } page=geometry; artifact=GetImageArtifact(image,"trim:minSize"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&page); if ((geometry.width < page.width) && (geometry.height < page.height)) { /* Limit trim to a minimum size. */ switch (image->gravity) { case CenterGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case NorthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthEastGravity: { geometry.y-=((ssize_t) page.height-geometry.height); break; } case EastGravity: { geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case SouthEastGravity: break; case SouthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; break; } case SouthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); break; } case WestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } default: break; } geometry.width=page.width; geometry.height=page.height; } geometry.x+=image->page.x; geometry.y+=image->page.y; trim_image=CropImage(image,&geometry,exception); if (trim_image != (Image *) NULL) Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry); return(trim_image); }
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImage method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ typedef struct _PixelChannels { double channel[CompositePixelChannel]; } PixelChannels; static PixelChannels **DestroyPixelThreadSet(PixelChannels **pixels) { register ssize_t i; assert(pixels != (PixelChannels **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (PixelChannels *) NULL) pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]); pixels=(PixelChannels **) RelinquishMagickMemory(pixels); return(pixels); } static PixelChannels **AcquirePixelThreadSet(const Image *image) { PixelChannels **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(PixelChannels **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (PixelChannels **) NULL) return((PixelChannels **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { register ssize_t j; pixels[i]=(PixelChannels *) AcquireQuantumMemory(image->columns, sizeof(**pixels)); if (pixels[i] == (PixelChannels *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) image->columns; j++) { register ssize_t k; for (k=0; k < MaxPixelChannels; k++) pixels[i][j].channel[k]=0.0; } } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelChannels *color_1, *color_2; double distance; register ssize_t i; color_1=(const PixelChannels *) x; color_2=(const PixelChannels *) y; distance=0.0; for (i=0; i < MaxPixelChannels; i++) distance+=color_1->channel[i]-(double) color_2->channel[i]; return(distance < 0 ? -1 : distance > 0 ? 1 : 0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel, const MagickEvaluateOperator op,const double value) { double result; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(double) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(double) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() that returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(double) ((size_t) pixel & (size_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise, value); break; } case LaplacianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(double) ((size_t) pixel << (size_t) (value+0.5)); break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+ 1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(double) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(double) (pixel+value); break; } case MedianEvaluateOperator: { result=(double) (pixel+value); break; } case MinEvaluateOperator: { result=(double) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(double) (value*pixel); break; } case OrEvaluateOperator: { result=(double) ((size_t) pixel | (size_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise, value); break; } case PowEvaluateOperator: { result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double) value)); break; } case RightShiftEvaluateOperator: { result=(double) ((size_t) pixel >> (size_t) (value+0.5)); break; } case RootMeanSquareEvaluateOperator: { result=(double) (pixel*pixel+value); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(double) (pixel-value); break; } case SumEvaluateOperator: { result=(double) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(double) (((double) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise, value); break; } case XorEvaluateOperator: { result=(double) ((size_t) pixel ^ (size_t) (value+0.5)); break; } } return(result); } static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception) { const Image *p, *q; size_t columns, rows; q=images; columns=images->columns; rows=images->rows; for (p=images; p != (Image *) NULL; p=p->next) { if (p->number_channels > q->number_channels) q=p; if (p->columns > columns) columns=p->columns; if (p->rows > rows) rows=p->rows; } return(CloneImage(q,columns,rows,MagickTrue,exception)); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict evaluate_pixels; RandomInfo **magick_restrict random_info; size_t number_images; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images); if (evaluate_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j, k; for (j=0; j < (ssize_t) number_images; j++) for (k=0; k < MaxPixelChannels; k++) evaluate_pixel[j].channel[k]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; register ssize_t i; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel); PixelTrait traits = GetPixelChannelTraits(next,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[j].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(image,channel,p),op, evaluate_pixel[j].channel[i]); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); for (k=0; k < (ssize_t) GetPixelChannels(image); k++) q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register ssize_t i, x; register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) evaluate_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[x].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(image,channel,p),j == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].channel[i]); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; switch (op) { case MeanEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]/=(double) number_images; break; } case MultiplyEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) evaluate_pixel[x].channel[i]*=QuantumScale; } break; } case RootMeanSquareEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/ number_images); break; } default: break; } } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double result; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; if ((traits & UpdatePixelTrait) == 0) continue; result=ApplyEvaluateOperator(random_info[id],q[i],op,value); if (op == MeanEvaluateOperator) result/=2.0; q[i]=ClampToQuantum(result); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImage method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { double result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+ c1*x^2+c2*x+c3). */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel+parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { double amplitude, bias, frequency, phase; /* Sinusoid: frequency, phase, amplitude, bias. */ frequency=(number_parameters >= 1) ? parameters[0] : 1.0; phase=(number_parameters >= 2) ? parameters[1] : 0.0; amplitude=(number_parameters >= 3) ? parameters[2] : 0.5; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (QuantumRange*(amplitude*sin((double) (2.0* MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias)); break; } case ArcsinFunction: { double bias, center, range, width; /* Arcsin (peged at range limits for invalid results): width, center, range, and bias. */ width=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=2.0/width*(QuantumScale*pixel-center); if ( result <= -1.0 ) result=bias-range/2.0; else if (result >= 1.0) result=bias+range/2.0; else result=(double) (range/MagickPI*asin((double) result)+bias); result*=QuantumRange; break; } case ArctanFunction: { double center, bias, range, slope; /* Arctan: slope, center, range, and bias. */ slope=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (MagickPI*slope*(QuantumScale*pixel-center)); result=(double) (QuantumRange*(range/MagickPI*atan((double) result)+bias)); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateFunctionImage(image,function,number_parameters,parameters, exception) != MagickFalse) return(MagickTrue); #endif if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyFunction(q[i],function,number_parameters,parameters, exception); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E n t r o p y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageEntropy() returns the entropy of one or more image channels. % % The format of the GetImageEntropy method is: % % MagickBooleanType GetImageEntropy(const Image *image,double *entropy, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o entropy: the average entropy of the selected channels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *entropy=channel_statistics[CompositePixelChannel].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtrema() returns the extrema of one or more image channels. % % The format of the GetImageExtrema method is: % % MagickBooleanType GetImageExtrema(const Image *image,size_t *minima, % size_t *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageRange(image,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageKurtosis() returns the kurtosis and skewness of one or more image % channels. % % The format of the GetImageKurtosis method is: % % MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis, % double *skewness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *kurtosis=channel_statistics[CompositePixelChannel].kurtosis; *skewness=channel_statistics[CompositePixelChannel].skewness; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMean() returns the mean and standard deviation of one or more image % channels. % % The format of the GetImageMean method is: % % MagickBooleanType GetImageMean(const Image *image,double *mean, % double *standard_deviation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *mean=channel_statistics[CompositePixelChannel].mean; *standard_deviation= channel_statistics[CompositePixelChannel].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M o m e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMoments() returns the normalized moments of one or more image % channels. % % The format of the GetImageMoments method is: % % ChannelMoments *GetImageMoments(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channels++; } return((size_t) (channels == 0 ? 1 : channels)); } MagickExport ChannelMoments *GetImageMoments(const Image *image, ExceptionInfo *exception) { #define MaxNumberImageMoments 8 CacheView *image_view; ChannelMoments *channel_moments; double M00[MaxPixelChannels+1], M01[MaxPixelChannels+1], M02[MaxPixelChannels+1], M03[MaxPixelChannels+1], M10[MaxPixelChannels+1], M11[MaxPixelChannels+1], M12[MaxPixelChannels+1], M20[MaxPixelChannels+1], M21[MaxPixelChannels+1], M22[MaxPixelChannels+1], M30[MaxPixelChannels+1]; PointInfo centroid[MaxPixelChannels+1]; ssize_t channel, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1, sizeof(*channel_moments)); if (channel_moments == (ChannelMoments *) NULL) return(channel_moments); (void) memset(channel_moments,0,(MaxPixelChannels+1)* sizeof(*channel_moments)); (void) memset(centroid,0,sizeof(centroid)); (void) memset(M00,0,sizeof(M00)); (void) memset(M01,0,sizeof(M01)); (void) memset(M02,0,sizeof(M02)); (void) memset(M03,0,sizeof(M03)); (void) memset(M10,0,sizeof(M10)); (void) memset(M11,0,sizeof(M11)); (void) memset(M12,0,sizeof(M12)); (void) memset(M20,0,sizeof(M20)); (void) memset(M21,0,sizeof(M21)); (void) memset(M22,0,sizeof(M22)); (void) memset(M30,0,sizeof(M30)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute center of mass (centroid). */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M00[channel]+=QuantumScale*p[i]; M00[MaxPixelChannels]+=QuantumScale*p[i]; M10[channel]+=x*QuantumScale*p[i]; M10[MaxPixelChannels]+=x*QuantumScale*p[i]; M01[channel]+=y*QuantumScale*p[i]; M01[MaxPixelChannels]+=y*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute center of mass (centroid). */ if (M00[channel] < MagickEpsilon) { M00[channel]+=MagickEpsilon; centroid[channel].x=(double) image->columns/2.0; centroid[channel].y=(double) image->rows/2.0; continue; } M00[channel]+=MagickEpsilon; centroid[channel].x=M10[channel]/M00[channel]; centroid[channel].y=M01[channel]/M00[channel]; } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute the image moments. */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } M00[MaxPixelChannels]/=GetImageChannels(image); M01[MaxPixelChannels]/=GetImageChannels(image); M02[MaxPixelChannels]/=GetImageChannels(image); M03[MaxPixelChannels]/=GetImageChannels(image); M10[MaxPixelChannels]/=GetImageChannels(image); M11[MaxPixelChannels]/=GetImageChannels(image); M12[MaxPixelChannels]/=GetImageChannels(image); M20[MaxPixelChannels]/=GetImageChannels(image); M21[MaxPixelChannels]/=GetImageChannels(image); M22[MaxPixelChannels]/=GetImageChannels(image); M30[MaxPixelChannels]/=GetImageChannels(image); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute elliptical angle, major and minor axes, eccentricity, & intensity. */ channel_moments[channel].centroid=centroid[channel]; channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0* M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon))); if (fabs(M11[channel]) < MagickEpsilon) { if (fabs(M20[channel]-M02[channel]) < MagickEpsilon) channel_moments[channel].ellipse_angle+=0.0; else if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=0.0; } else if (M11[channel] < 0.0) { if (fabs(M20[channel]-M02[channel]) < MagickEpsilon) channel_moments[channel].ellipse_angle+=0.0; else if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=180.0; } else { if (fabs(M20[channel]-M02[channel]) < MagickEpsilon) channel_moments[channel].ellipse_angle+=0.0; else if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=0.0; } channel_moments[channel].ellipse_eccentricity=sqrt(1.0-( channel_moments[channel].ellipse_axis.y/ (channel_moments[channel].ellipse_axis.x+MagickEpsilon))); channel_moments[channel].ellipse_intensity=M00[channel]/ (MagickPI*channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.y+MagickEpsilon); } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Normalize image moments. */ M10[channel]=0.0; M01[channel]=0.0; M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0); M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0); M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0); M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0); M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0); M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0); M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0); M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0); M00[channel]=1.0; } image_view=DestroyCacheView(image_view); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute Hu invariant moments. */ channel_moments[channel].invariant[0]=M20[channel]+M02[channel]; channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])* (M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel]; channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])* (M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])* (3.0*M21[channel]-M03[channel]); channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])* (M30[channel]+M12[channel])+(M21[channel]+M03[channel])* (M21[channel]+M03[channel]); channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])* ((M30[channel]+M12[channel])*(M30[channel]+M12[channel])- (M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+ 4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]); channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+ M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])* (M03[channel]+M21[channel]))-(M20[channel]-M02[channel])* (M30[channel]+M12[channel])*(M03[channel]+M21[channel]); } if (y < (ssize_t) image->rows) channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments); return(channel_moments); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l P e r c e p t u a l H a s h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePerceptualHash() returns the perceptual hash of one or more % image channels. % % The format of the GetImagePerceptualHash method is: % % ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, ExceptionInfo *exception) { ChannelPerceptualHash *perceptual_hash; char *colorspaces, *q; const char *artifact; MagickBooleanType status; register char *p; register ssize_t i; perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory( MaxPixelChannels+1UL,sizeof(*perceptual_hash)); if (perceptual_hash == (ChannelPerceptualHash *) NULL) return((ChannelPerceptualHash *) NULL); artifact=GetImageArtifact(image,"phash:colorspaces"); if (artifact != NULL) colorspaces=AcquireString(artifact); else colorspaces=AcquireString("sRGB,HCLp"); perceptual_hash[0].number_colorspaces=0; perceptual_hash[0].number_channels=0; q=colorspaces; for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++) { ChannelMoments *moments; Image *hash_image; size_t j; ssize_t channel, colorspace; if (i >= MaximumNumberOfPerceptualColorspaces) break; colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p); if (colorspace < 0) break; perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace; hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) break; hash_image->depth=8; status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace, exception); if (status == MagickFalse) break; moments=GetImageMoments(hash_image,exception); perceptual_hash[0].number_colorspaces++; perceptual_hash[0].number_channels+=GetImageChannels(hash_image); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) break; for (channel=0; channel <= MaxPixelChannels; channel++) for (j=0; j < MaximumNumberOfImageMoments; j++) perceptual_hash[channel].phash[i][j]= (-MagickLog10(moments[channel].invariant[j])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); } colorspaces=DestroyString(colorspaces); return(perceptual_hash); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageRange() returns the range of one or more image channels. % % The format of the GetImageRange method is: % % MagickBooleanType GetImageRange(const Image *image,double *minima, % double *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima, double *maxima,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType initialize, status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; initialize=MagickTrue; *maxima=0.0; *minima=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,initialize) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double row_maxima = 0.0, row_minima = 0.0; MagickBooleanType row_initialize; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } row_initialize=MagickTrue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (row_initialize != MagickFalse) { row_minima=(double) p[i]; row_maxima=(double) p[i]; row_initialize=MagickFalse; } else { if ((double) p[i] < row_minima) row_minima=(double) p[i]; if ((double) p[i] > row_maxima) row_maxima=(double) p[i]; } } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageRange) #endif { if (initialize != MagickFalse) { *minima=row_minima; *maxima=row_maxima; initialize=MagickFalse; } else { if (row_minima < *minima) *minima=row_minima; if (row_maxima > *maxima) *maxima=row_maxima; } } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageStatistics() returns statistics for each channel in the image. The % statistics include the channel depth, its minima, maxima, mean, standard % deviation, kurtosis and skewness. You can access the red channel mean, for % example, like this: % % channel_statistics=GetImageStatistics(image,exception); % red_mean=channel_statistics[RedPixelChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageStatistics method is: % % ChannelStatistics *GetImageStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area, *histogram, standard_deviation; MagickStatusType status; QuantumAny range; register ssize_t i; size_t depth; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); channel_statistics=(ChannelStatistics *) AcquireQuantumMemory( MaxPixelChannels+1,sizeof(*channel_statistics)); if ((channel_statistics == (ChannelStatistics *) NULL) || (histogram == (double *) NULL)) { if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (channel_statistics != (ChannelStatistics *) NULL) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } (void) memset(channel_statistics,0,(MaxPixelChannels+1)* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickMaximumValue); channel_statistics[i].minima=MagickMaximumValue; } (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute pixel statistics. */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[channel].depth; range=GetQuantumRange(depth); status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range), range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[channel].depth++; i--; continue; } } if ((double) p[i] < channel_statistics[channel].minima) channel_statistics[channel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[channel].maxima) channel_statistics[channel].maxima=(double) p[i]; channel_statistics[channel].sum+=p[i]; channel_statistics[channel].sum_squared+=(double) p[i]*p[i]; channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]* p[i]; channel_statistics[channel].area++; if ((double) p[i] < channel_statistics[CompositePixelChannel].minima) channel_statistics[CompositePixelChannel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima) channel_statistics[CompositePixelChannel].maxima=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum((double) p[i]))+i]++; channel_statistics[CompositePixelChannel].sum+=(double) p[i]; channel_statistics[CompositePixelChannel].sum_squared+=(double) p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].area++; } p+=GetPixelChannels(image); } } for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Normalize pixel statistics. */ area=PerceptibleReciprocal(channel_statistics[i].area); channel_statistics[i].sum*=area; channel_statistics[i].sum_squared*=area; channel_statistics[i].sum_cubed*=area; channel_statistics[i].sum_fourth_power*=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; standard_deviation=sqrt(channel_statistics[i].variance- (channel_statistics[i].mean*channel_statistics[i].mean)); standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area- 1.0)*channel_statistics[i].area*standard_deviation*standard_deviation); channel_statistics[i].standard_deviation=standard_deviation; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double number_bins; register ssize_t j; /* Compute pixel entropy. */ PixelChannel channel = GetPixelChannelChannel(image,i); number_bins=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) if (histogram[GetPixelChannels(image)*j+i] > 0.0) number_bins++; area=PerceptibleReciprocal(channel_statistics[channel].area); for (j=0; j <= (ssize_t) MaxMap; j++) { double count; count=area*histogram[GetPixelChannels(image)*j+i]; channel_statistics[channel].entropy+=-count*MagickLog10(count)* PerceptibleReciprocal(MagickLog10(number_bins)); channel_statistics[CompositePixelChannel].entropy+=-count* MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/ GetPixelChannels(image); } } histogram=(double *) RelinquishMagickMemory(histogram); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Compute kurtosis & skewness statistics. */ standard_deviation=PerceptibleReciprocal( channel_statistics[i].standard_deviation); channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0* channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0* channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation*standard_deviation)-3.0; } channel_statistics[CompositePixelChannel].mean=0.0; channel_statistics[CompositePixelChannel].standard_deviation=0.0; channel_statistics[CompositePixelChannel].entropy=0.0; for (i=0; i < (ssize_t) MaxPixelChannels; i++) { channel_statistics[CompositePixelChannel].mean+= channel_statistics[i].mean; channel_statistics[CompositePixelChannel].standard_deviation+= channel_statistics[i].standard_deviation; channel_statistics[CompositePixelChannel].entropy+= channel_statistics[i].entropy; } channel_statistics[CompositePixelChannel].mean/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].standard_deviation/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].entropy/=(double) GetImageChannels(image); if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict polynomial_pixels; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images); if (polynomial_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register ssize_t i, x; register PixelChannels *polynomial_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } polynomial_pixel=polynomial_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) polynomial_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; if (j >= (ssize_t) number_terms) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { MagickRealType coefficient, degree; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (polynomial_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; coefficient=(MagickRealType) terms[2*j]; degree=(MagickRealType) terms[(j << 1)+1]; polynomial_pixel[x].channel[i]+=coefficient* pow(QuantumScale*GetPixelChannel(image,channel,p),degree); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,PolynomialImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ typedef struct _SkipNode { size_t next[9], count, signature; } SkipNode; typedef struct _SkipList { ssize_t level; SkipNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed; SkipList skip_list; size_t signature; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); if (pixel_list->skip_list.nodes != (SkipNode *) NULL) pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory( pixel_list->skip_list.nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) memset((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL, sizeof(*pixel_list->skip_list.nodes)); if (pixel_list->skip_list.nodes == (SkipNode *) NULL) return(DestroyPixelList(pixel_list)); (void) memset(pixel_list->skip_list.nodes,0,65537UL* sizeof(*pixel_list->skip_list.nodes)); pixel_list->signature=MagickCoreSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) memset(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const size_t color) { register SkipList *p; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ p=(&pixel_list->skip_list); p->nodes[color].signature=pixel_list->signature; p->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=p->level; level >= 0; level--) { while (p->nodes[search].next[level] < color) search=p->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (p->level+2)) level=p->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > p->level) { p->level++; update[p->level]=65536UL; } /* Link the node into the skip-list. */ do { p->nodes[color].next[level]=p->nodes[update[level]].next[level]; p->nodes[update[level]].next[level]=color; } while (level-- > 0); } static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, maximum; ssize_t count; /* Find the maximum value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; maximum=p->nodes[color].next[0]; do { color=p->nodes[color].next[0]; if (color > maximum) maximum=color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) maximum); } static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel) { double sum; register SkipList *p; size_t color; ssize_t count; /* Find the mean value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; do { color=p->nodes[color].next[0]; sum+=(double) p->nodes[color].count*color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sum); } static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color; ssize_t count; /* Find the median value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; do { color=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, minimum; ssize_t count; /* Find the minimum value for each of the color. */ p=(&pixel_list->skip_list); count=0; color=65536UL; minimum=p->nodes[color].next[0]; do { color=p->nodes[color].next[0]; if (color < minimum) minimum=color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) minimum); } static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, max_count, mode; ssize_t count; /* Make each pixel the 'predominant color' of the specified neighborhood. */ p=(&pixel_list->skip_list); color=65536L; mode=color; max_count=p->nodes[mode].count; count=0; do { color=p->nodes[color].next[0]; if (p->nodes[color].count > max_count) { mode=color; max_count=p->nodes[mode].count; } count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) mode); } static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, next, previous; ssize_t count; /* Finds the non peak value for each of the colors. */ p=(&pixel_list->skip_list); color=65536L; next=p->nodes[color].next[0]; count=0; do { previous=color; color=next; next=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetRootMeanSquarePixelList(PixelList *pixel_list, Quantum *pixel) { double sum; register SkipList *p; size_t color; ssize_t count; /* Find the root mean square value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; do { color=p->nodes[color].next[0]; sum+=(double) (p->nodes[color].count*color*color); count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sqrt(sum)); } static inline void GetStandardDeviationPixelList(PixelList *pixel_list, Quantum *pixel) { double sum, sum_squared; register SkipList *p; size_t color; ssize_t count; /* Find the standard-deviation value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; sum_squared=0.0; do { register ssize_t i; color=p->nodes[color].next[0]; sum+=(double) p->nodes[color].count*color; for (i=0; i < (ssize_t) p->nodes[color].count; i++) sum_squared+=((double) color)*((double) color); count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; sum_squared/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum))); } static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(pixel); signature=pixel_list->skip_list.nodes[index].signature; if (signature == pixel_list->signature) { pixel_list->skip_list.nodes[index].count++; return; } AddNodePixelList(pixel_list,index); } static void ResetPixelList(PixelList *pixel_list) { int level; register SkipNode *root; register SkipList *p; /* Reset the skip-list. */ p=(&pixel_list->skip_list); root=p->nodes+65536UL; p->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **magick_restrict pixel_list; ssize_t center, y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); statistic_image=CloneImage(image,0,0,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(statistic_image,DirectClass,exception); if (status == MagickFalse) { statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1)); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))* (MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y- (ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1), MagickMax(height,1),exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) statistic_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { Quantum pixel; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image, channel); if ((traits == UndefinedPixelTrait) || (statistic_traits == UndefinedPixelTrait)) continue; if (((statistic_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(statistic_image,channel,p[center+i],q); continue; } if ((statistic_traits & UpdatePixelTrait) == 0) continue; pixels=p; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { InsertPixelList(pixels[i],pixel_list[id]); pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } switch (type) { case GradientStatistic: { double maximum, minimum; GetMinimumPixelList(pixel_list[id],&pixel); minimum=(double) pixel; GetMaximumPixelList(pixel_list[id],&pixel); maximum=(double) pixel; pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum)); break; } case MaximumStatistic: { GetMaximumPixelList(pixel_list[id],&pixel); break; } case MeanStatistic: { GetMeanPixelList(pixel_list[id],&pixel); break; } case MedianStatistic: default: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { GetMinimumPixelList(pixel_list[id],&pixel); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case RootMeanSquareStatistic: { GetRootMeanSquarePixelList(pixel_list[id],&pixel); break; } case StandardDeviationStatistic: { GetStandardDeviationPixelList(pixel_list[id],&pixel); break; } } SetPixelChannel(statistic_image,channel,pixel,q); } p+=GetPixelChannels(image); q+=GetPixelChannels(statistic_image); } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
mkl_util.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #ifdef INTEL_MKL #include <vector> #include <unordered_map> #include <utility> #ifdef INTEL_MKL_ML #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" #include "mkl_trans.h" #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/graph/mkl_graph_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #ifndef INTEL_MKL_ML #include "mkldnn.hpp" #include "tensorflow/core/lib/core/stringpiece.h" using mkldnn::engine; using mkldnn::memory; using mkldnn::padding_kind; using mkldnn::primitive; using mkldnn::reorder; #endif #ifdef _WIN32 typedef unsigned int uint; #endif namespace tensorflow { // The file contains a number of utility classes and functions used by MKL // enabled kernels // This class encapsulates all the meta data that is associated with an MKL // tensor. A tensor is an MKL tensor if it was created as the result of an // MKL operation, and did not go through a conversion to a standard // Tensorflow tensor. typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims; typedef enum { Dim_N = 0, Dim_C = 1, Dim_H = 2, Dim_W = 3, Dim_O = 0, Dim_I = 1 } MklDnnDims; #ifdef INTEL_MKL_ML class MklShape { public: MklShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy ~MklShape() { if (sizes_) delete[] sizes_; if (strides_) delete[] strides_; if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS); if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS); if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_; } const bool IsMklTensor() const { return isMklTensor_; } void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; } void SetDimensions(const size_t dimension) { dimension_ = dimension; } void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; } void SetMklLayout(const void* primitive, size_t resourceType) { CHECK_EQ( dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive, (dnnResourceType_t)resourceType), E_SUCCESS); } void SetTfLayout(const size_t dimension, const size_t* sizes, const size_t* strides) { dimension_ = dimension; if (dimension > 0) { // MKl doesn't support zero dimension tensors sizes_ = new size_t[dimension]; strides_ = new size_t[dimension]; for (int ii = 0; ii < dimension; ii++) { sizes_[ii] = sizes[ii]; strides_[ii] = strides[ii]; } CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides), E_SUCCESS); } } // Default case - MKL dim ordering is opposite of TF dim ordering // MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim // TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim // For layers that rely on data_format semantics (conv, pooling etc.) // or operate only on certain dimensions (relu, concat, split etc.), // Mkl APIs might require us to reorder these dimensions. In such cases, // kernels should explicitly set this map void SetTfDimOrder(const size_t dimension) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = dimension - (ii + 1); } } void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii]; } } void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { CHECK_EQ(dimension, 4); CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N; } const dnnLayout_t GetMklLayout() const { return mklLayout_; } const dnnLayout_t GetTfLayout() const { return tfLayout_; } const dnnLayout_t GetCurLayout() const { return isMklTensor_ ? mklLayout_ : tfLayout_; } size_t GetDimension() const { return dimension_; } const size_t* GetSizes() const { return sizes_; } int64 dim_size(int index) const { return sizes_[index]; } int64 tf_dim_size(int index) const { return sizes_[tf_to_mkl_dim_map_[index]]; } const size_t* GetStrides() const { return strides_; } const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; } size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Channel dimension. bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Batch dimension. bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Width dimension. bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Height dimension. bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NCHW format. bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NHWC format. bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } void GetConvertedFlatData(dnnLayout_t targetLayout, void* input, void* output) const { dnnLayout_t curLayout; if (isMklTensor_) curLayout = mklLayout_; else curLayout = tfLayout_; dnnPrimitive_t convert; CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout), E_SUCCESS); CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS); CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS); } // The following methods are used for serializing and de-serializing the // contents of the mklshape object. // The data is serialized in this order // isMklTensor_ // dimension_ // sizes_ // strides_ // mklLayout_ // tfLayout_ // tf_to_mkl_dim_map_ #define SIZE_OF_MKL_DNN_BUF \ (dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to // serialize dnn_layout pointer // Size of buffer to hold the serialized object, the size is computed as // follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) + // sizeof(strides_) // + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer) // + sizeof(tf_to_mkl_dim_map_) #define SIZE_OF_MKL_SERIAL_DATA(dims) \ (2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF) // First we need to define some macro for offsets into the serial buffer where // different elements of Mklshape is written/read from #define IS_MKL_TENSOR_OFFSET 0 // Location from start of buffer where isMklTensor_ is serialized #define DIMS_OFFSET \ (IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_ // Location of sizes. Note dim is not used here, left here // to make macros consistent. #define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t)) #define STRIDES_OFFSET(dims) \ (SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides #define MKL_LAYOUT_OFFSET(dims) \ (STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_ #define TF_LAYOUT_OFFSET(dims) \ (MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_ // Location of tf_to_mkl_dim_map_ #define TF_TO_MKL_DIM_MAP_OFFSET(dims) \ (TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // TODO(agramesh1) make sure to create a const to share with rewrite pass // for min size of MKL metadata tensor. void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) { CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize"; // Make sure buffer holds at least isMklTensor_ isMklTensor_ = *reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0; if (isMklTensor_) { // If it is an MKL Tensor then read the rest dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET)); CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small in DeSerialize"; sizes_ = new size_t[dimension_]; strides_ = new size_t[dimension_]; tf_to_mkl_dim_map_ = new size_t[dimension_]; for (int i = 0; i < dimension_; i++) { sizes_[i] = reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i]; strides_[i] = reinterpret_cast<const size_t*>( buf + STRIDES_OFFSET(dimension_))[i]; tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>( buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i]; } CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } void SerializeMklShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small to Serialize"; *reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) = isMklTensor_ ? 1 : 0; if (isMklTensor_) { *(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_; for (int i = 0; i < dimension_; i++) { reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] = sizes_[i]; reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] = strides_[i]; reinterpret_cast<size_t*>(buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] = tf_to_mkl_dim_map_[i]; } CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ( dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } private: bool isMklTensor_ = false; // Flag to indicate if the tensor is an MKL tensor or not dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding // Tensorflow tensor, used when conversion from MKL to standard tensor size_t dimension_ = 0; size_t* sizes_ = nullptr; // Required by MKL for conversions size_t* strides_ = nullptr; // Required by MKL for conversions size_t* tf_to_mkl_dim_map_ = nullptr; // TF dimension corresponding to this MKL dimension }; #else // Forward decl TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format); memory::dims CalculateTFStrides(const memory::dims& dims_tf_order); memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype); class MklDnnShape { private: typedef struct { /// Flag to indicate if the tensor is an MKL tensor or not bool is_mkl_tensor_ = false; /// Number of dimensions in Tensorflow format size_t dimension_ = 0; /// Required by MKLDNN for conversions mkldnn_dims_t sizes_; // Required by MKL for conversions memory::format tf_data_format_ = memory::format::format_undef; memory::data_type T_ = memory::data_type::data_undef; // MKL layout mkldnn_memory_desc_t mkl_md_; /// TF dimension corresponding to this MKL dimension mkldnn_dims_t map_; } MklShapeData; MklShapeData data_; typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t; #define INVALID_DIM_SIZE -1 public: MklDnnShape() { for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); ++i) { data_.sizes_[i] = -1; } for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) { data_.map_[i] = -1; } } ~MklDnnShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy /// Helper function to compare memory::desc objects for MklDnn. /// May be this should go into MklDnn directly. inline bool CompareMklDnnLayouts(const memory::desc& md1, const memory::desc& md2) const { mkldnn_memory_desc_t mdd1 = md1.data; mkldnn_memory_desc_t mdd2 = md2.data; const char* d1 = reinterpret_cast<const char*>(&mdd1); const char* d2 = reinterpret_cast<const char*>(&mdd2); size_t md_size = sizeof(mdd1); for (size_t i = 0; i < md_size; i++) { if (*d1++ != *d2++) { return false; } } return true; } /// Equality function for MklDnnShape objects /// @return true if both are equal; false otherwise. inline bool operator==(const MklDnnShape& input_shape) const { if (this->IsMklTensor() != input_shape.IsMklTensor()) { return false; } // If input tensors are in Mkl layout, then we check for dimensions and // sizes. if (this->IsMklTensor()) { return this->GetTfShape() == input_shape.GetTfShape() && CompareMklDnnLayouts(this->GetMklLayout(), input_shape.GetMklLayout()); } return true; } /// Equality operator for MklDnnShape and TFShape. /// Returns: true if TF shapes for both are the same, false otherwise inline bool operator==(const TensorShape& input_shape) const { if (!this->IsMklTensor()) { return false; } return this->GetTfShape() == input_shape; } inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; } inline void SetMklTensor(bool is_mkl_tensor) { data_.is_mkl_tensor_ = is_mkl_tensor; } inline void SetDimensions(const size_t dimension) { data_.dimension_ = dimension; } inline size_t GetDimension(char dimension) const { int index = GetMklDnnTensorDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline int32 GetMklDnnTensorDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims::Dim_N; case 'C': return MklDnnDims::Dim_C; case 'H': return MklDnnDims::Dim_H; case 'W': return MklDnnDims::Dim_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline size_t GetDimension() const { return data_.dimension_; } inline const int* GetSizes() const { return reinterpret_cast<const int*>(&data_.sizes_[0]); } // Returns an mkldnn::memory::dims object that contains the sizes of this // MklDnnShape object. inline memory::dims GetSizesAsMklDnnDims() const { memory::dims retVal; if (data_.is_mkl_tensor_) { size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); for (size_t i = 0; i < dimensions; i++) { if (data_.sizes_[i] != INVALID_DIM_SIZE) retVal.push_back(data_.sizes_[i]); } } else { CHECK_EQ(data_.is_mkl_tensor_, true); } return retVal; } inline int64 DimSize(int index) const { CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0])); return data_.sizes_[index]; } /// Return TensorShape that describes the Tensorflow shape of the tensor /// represented by this MklShape. inline TensorShape GetTfShape() const { CHECK_EQ(data_.is_mkl_tensor_, true); std::vector<int32> shape(data_.dimension_, -1); if (data_.tf_data_format_ != memory::format::blocked) { for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[TfDimIdx(idx)]; } } else { // If Tensorflow shape is in Blocked format, then we don't have dimension // map for it. So we just create Tensorflow shape from sizes in the // specified order. for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[idx]; } } TensorShape ts; bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok(); CHECK_EQ(ret, true); return ts; } inline void SetElemType(memory::data_type dt) { data_.T_ = dt; } inline const memory::data_type GetElemType() { return data_.T_; } inline void SetMklLayout(memory::primitive_desc* pd) { CHECK_NOTNULL(pd); data_.mkl_md_ = pd->desc().data; } inline void SetMklLayout(memory::desc* md) { CHECK_NOTNULL(md); data_.mkl_md_ = md->data; } inline const memory::desc GetMklLayout() const { return memory::desc(data_.mkl_md_); } inline memory::format GetTfDataFormat() const { return data_.tf_data_format_; } /// We don't create primitive_descriptor for TensorFlow layout now. /// We use lazy evaluation and create it only when needed. Input format can /// also be Blocked format. inline void SetTfLayout(size_t dims, const memory::dims& sizes, memory::format format) { CHECK_EQ(dims, sizes.size()); data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ii++) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { SetTfDimOrder(dims, format); } } inline const memory::desc GetTfLayout() const { memory::dims dims; for (size_t ii = 0; ii < data_.dimension_; ii++) { dims.push_back(data_.sizes_[ii]); } // Create Blocked memory desc if input TF format was set like that. if (data_.tf_data_format_ == memory::format::blocked) { auto strides = CalculateTFStrides(dims); return CreateBlockedMemDescHelper(dims, strides, data_.T_); } else { return memory::desc(dims, data_.T_, data_.tf_data_format_); } } inline const memory::desc GetCurLayout() const { return IsMklTensor() ? GetMklLayout() : GetTfLayout(); } // nhasabni - I've removed SetTfDimOrder that was setting default order in // case of MKL-ML. We don't need a case of default dimension order because // when an operator that does not get data_format attribute gets all inputs // in Tensorflow format, it will produce output in Tensorflow format. inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) { CHECK(dimension == data_.dimension_); for (size_t ii = 0; ii < dimension; ii++) { data_.map_[ii] = map[ii]; } } inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { // TODO(nhasabni): Why do we restrict this to 4D? CHECK_EQ(dimension, 4); CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W; data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H; data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C; data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N; } inline void SetTfDimOrder(const size_t dimension, memory::format format) { TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format); SetTfDimOrder(dimension, data_format); } inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; } inline size_t TfDimIdx(int index) const { return data_.map_[index]; } inline int64 TfDimSize(int index) const { return data_.sizes_[TfDimIdx(index)]; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Channel dimension. inline bool IsMklChannelDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_C; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Batch dimension. inline bool IsMklBatchDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_N; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Width dimension. inline bool IsMklWidthDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_W; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Height dimension. inline bool IsMklHeightDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_H; } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NCHW format. inline bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NHWC format. inline bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// The following methods are used for serializing and de-serializing the /// contents of the mklshape object. /// The data is serialized in this order /// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_; /// Size of buffer to hold the serialized object, the size is computed by /// following above mentioned order inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); } void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small to SerializeMklDnnShape"; *reinterpret_cast<MklShapeData*>(buf) = data_; } void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) { // Make sure buffer holds at least is_mkl_tensor_. CHECK(buf_size >= sizeof(data_.is_mkl_tensor_)) << "Buffer size is too small in DeSerializeMklDnnShape"; const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf); if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small in DeSerializeMklDnnShape"; data_ = *reinterpret_cast<const MklShapeData*>(buf); } } }; #endif // List of MklShape objects. Used in Concat/Split layers. #ifndef INTEL_MKL_ML typedef std::vector<MklDnnShape> MklDnnShapeList; #else typedef std::vector<MklShape> MklShapeList; #endif #ifdef INTEL_MKL_ML // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { if (!s.IsMklTensor()) { return false; } } return true; } template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; for (size_t j = 0; j < mkl_shape.GetDimension(); j++) { // Outermost to innermost dimension output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]); } // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout()); void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data()); void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data()); if (mkl_tensor.NumElements() != 0) { mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer); } return output_tensor; } #else using mkldnn::stream; template <typename T> class MklDnnData; template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklDnnShape& mkl_shape) { Tensor output_tensor; try { if (!mkl_shape.IsMklTensor()) return mkl_tensor; // return input since it is already TF tensor TensorShape output_shape = mkl_shape.GetTfShape();; // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); auto cpu_engine = engine(engine::cpu, 0); MklDnnData<T> input(&cpu_engine); // Get Mkl layout of input tensor. auto input_mkl_md = mkl_shape.GetMklLayout(); auto output_tf_md = mkl_shape.GetTfLayout(); auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine); input.SetUsrMem(input_mkl_md, &mkl_tensor); // reorder if (input.IsReorderNeeded(output_tf_pd)) { std::vector<primitive> net; CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net), true); stream(stream::kind::eager).submit(net).wait(); } else { // If not, just forward input tensor to output tensor. CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape)); } } catch (mkldnn::error& e) { string error_msg = "Status: " + std::to_string(e.status) + ", message: " + string(e.message) + ", in file " + string(__FILE__) + ":" + std::to_string(__LINE__); LOG(FATAL) << "Operation received an exception: " << error_msg; } return output_tensor; } #endif // Get the MKL shape from the second string tensor #ifdef INTEL_MKL_ML inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #else inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) { mklshape->DeSerializeMklDnnShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #endif // Gets the actual input inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) { return ctext->input(GetTensorDataIndex(n, ctext->num_inputs())); } inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, OpInputList* input_tensors) { CHECK_NOTNULL(input_tensors); ctext->input_list(name, input_tensors); } #ifdef INTEL_MKL_ML inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #else inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklDnnShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklDnnShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #endif #ifndef INTEL_MKL_ML /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { // Sanity check. CHECK_NOTNULL(context); CHECK_LT(input_idx, context->num_inputs()); MklDnnShape input_mkl_shape; GetMklShape(context, input_idx, &input_mkl_shape); if (input_mkl_shape.IsMklTensor()) { return input_mkl_shape.GetTfShape(); } else { const Tensor& t = MklGetInput(context, input_idx); return t.shape(); } } #endif #ifdef INTEL_MKL_ML // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif #ifdef INTEL_MKL_ML // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently #ifndef INTEL_MKL_ML template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim(pd.get_size() / sizeof(T) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<T>().data()); } #else inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, dnnLayout_t lt_buff, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim( dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) / sizeof(float) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<float>().data()); } #endif template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, TensorShape tf_shape) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); } inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, const size_t* sizes) { // MKL requires strides in NCHW if (data_format == FORMAT_NHWC) { strides[0] = sizes[2]; strides[1] = sizes[0] * sizes[2]; strides[2] = 1; strides[3] = sizes[0] * sizes[1] * sizes[2]; } else { strides[0] = 1; strides[1] = sizes[0]; strides[2] = sizes[0] * sizes[1]; strides[3] = sizes[0] * sizes[1] * sizes[2]; } } #ifdef INTEL_MKL_ML inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, TensorShape* tf_shape) { size_t tf_dim = mkl_shape.GetDimension(); const size_t* tf_sizes = mkl_shape.GetSizes(); OP_REQUIRES(context, tf_dim == 4, errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim")); std::vector<int32> sizes; sizes.push_back(tf_sizes[3]); if (data_format_ == FORMAT_NHWC) { sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); sizes.push_back(tf_sizes[2]); } else { sizes.push_back(tf_sizes[2]); sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); } OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape)); } #endif inline int32 GetMklTensorDimIndex(char dimension) { switch (dimension) { case 'N': return MklDims::N; case 'C': return MklDims::C; case 'H': return MklDims::H; case 'W': return MklDims::W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } #ifdef INTEL_MKL_ML inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return mkl_shape.dim_size(index); } #endif inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); const Tensor& meta = context->input(idx_meta_in); Tensor output(data.dtype()); Tensor meta_output(meta.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, data.shape())); CHECK(meta_output.CopyFrom(meta, meta.shape())); context->set_output(idx_data_out, output); context->set_output(idx_meta_out, meta_output); } #ifdef INTEL_MKL_ML inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #else inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #endif #ifdef INTEL_MKL_ML inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #else inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklDnnShape dnn_shape_output; dnn_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, dnn_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifndef INTEL_MKL_ML // Set a dummy MKLDNN shape (called when the output is in TF format) inline void SetDummyMklDnnShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context, int idx_in, int idx_out, const MklDnnShape& mkl_shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); AllocateOutputSetMklShape(context, idx_out, mkl_shape); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif // Forward the MKL shape ONLY (used in elementwise and other ops where // we call the eigen implementation and MKL shape is not used) inline void ForwardMklMetaDataInToOut(OpKernelContext* context, uint32 idx_data_in, uint32_t idx_data_out) { uint32 idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs()); uint32 idx_meta_out = GetTensorMetaDataIndex(idx_data_out, context->num_outputs()); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifdef INTEL_MKL_ML // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } // We don't need these functions in MKLDNN. We have defined equality operator // on MklDnnShape class directly. // Checks if the TF shape for both MKL tensors is the same or not // Returns: true if both TF shapes are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const MklShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const MklShape* input_shape_1) { return MklCompareShapes(input_shape_1, input_shape_0); } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->dims() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->dims(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // These functions do not compile with MKL-DNN since mkl.h is missing. // We may need to remove them later. // TODO(intel_tf): Remove this routine when faster MKL layout conversion is // out. inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = input.dim_size(0); int64 H = input.dim_size(1); int64 W = input.dim_size(2); int64 C = input.dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C, buf_out + n * stride_n, H * W); } } inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = (*output)->dim_size(0); int64 H = (*output)->dim_size(1); int64 W = (*output)->dim_size(2); int64 C = (*output)->dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W, buf_out + n * stride_n, C); } } #endif // ------------------------------------------------------------------- #ifndef INTEL_MKL_ML /// Return MKL-DNN data type (memory::data_type) for input type T /// /// @input None /// @return memory::data_type corresponding to type T template <typename T> static memory::data_type MklDnnType(); /// Instantiation for float type. Add similar instantiations for other /// type if needed. template <> memory::data_type MklDnnType<float>() { return memory::data_type::f32; } /// Map TensorFlow's data format into MKL-DNN data format /// /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::nhwc; else if (format == FORMAT_NCHW) return memory::format::nchw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to get rid of compiler warning return memory::format::format_undef; } /// Map MKL-DNN data format to TensorFlow's data format /// /// @input: memory::format /// @return: Tensorflow data format corresponding to memory::format /// Fails with an error if invalid data format. inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) { if (format == memory::format::nhwc) return FORMAT_NHWC; else if (format == memory::format::nchw) return FORMAT_NCHW; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure // that we don't come here. return FORMAT_NHWC; } /// Map TensorShape object into memory::dims required by MKL-DNN /// /// This function will simply map input TensorShape into MKL-DNN dims /// naively. So it will preserve the order of dimensions. E.g., if /// input tensor is in NHWC format, then dims will be in NHWC format /// also. /// /// @input TensorShape object in shape /// @return memory::dims corresponding to TensorShape inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) { memory::dims dims(shape.dims()); for (int d = 0; d < shape.dims(); ++d) { dims[d] = shape.dim_size(d); } return dims; } /// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN /// /// This function is a specific one than above function. It will map input /// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the /// order of dimensions. E.g., if input tensor is in NHWC format, then dims /// will be in NCHW format, and not in NHWC format. /// /// @input TensorShape object in shape /// @return memory::dims in MKL-DNN required NCHW format inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex(format, 'N')); int c = shape.dim_size(GetTensorDimIndex(format, 'C')); int h = shape.dim_size(GetTensorDimIndex(format, 'H')); int w = shape.dim_size(GetTensorDimIndex(format, 'W')); // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Overloaded version of function above. Input parameters are /// self-explanatory. inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = in_dims[GetTensorDimIndex(format, 'N')]; int c = in_dims[GetTensorDimIndex(format, 'C')]; int h = in_dims[GetTensorDimIndex(format, 'H')]; int w = in_dims[GetTensorDimIndex(format, 'W')]; // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Map MklDnn memory::dims object into TensorShape object. /// /// This function will simply map input shape in MKL-DNN memory::dims format /// in Tensorflow's TensorShape object by preserving dimension order. /// /// @input MKL-DNN memory::dims object /// @output TensorShape corresponding to memory::dims inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { std::vector<int32> shape(dims.size(), -1); for (int d = 0; d < dims.size(); d++) { shape[d] = dims[d]; } TensorShape ret; CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true); return ret; } /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, /// dimesion with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// /// @input Tensorflow shape in memory::dims type /// @return memory::dims containing strides for the tensor. inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) { CHECK_GT(dims_tf_order.size(), 0); memory::dims strides(dims_tf_order.size()); int last_dim_idx = dims_tf_order.size() - 1; strides[last_dim_idx] = 1; for (int d = last_dim_idx - 1; d >= 0; d--) { strides[d] = strides[d + 1] * dims_tf_order[d + 1]; } return strides; } inline padding_kind TFPaddingToMklDnnPadding(Padding pad) { // MKL-DNN only supports zero padding. return padding_kind::zero; } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype) { CHECK_EQ(dim.size(), strides.size()); // We have to construct memory descriptor in a C style. This is not at all // ideal but MKLDNN does not offer any API to construct descriptor in // blocked format except a copy constructor that accepts // mkldnn_memory_desc_t. mkldnn_memory_desc_t md; md.primitive_kind = mkldnn_memory; md.ndims = dim.size(); md.format = mkldnn_blocked; md.data_type = memory::convert_to_c(dtype); for (size_t i = 0; i < dim.size(); i++) { md.layout_desc.blocking.block_dims[i] = 1; md.layout_desc.blocking.strides[1][i] = 1; md.layout_desc.blocking.strides[0][i] = strides[i]; md.layout_desc.blocking.padding_dims[i] = dim[i]; md.layout_desc.blocking.offset_padding_to_data[i] = 0; md.dims[i] = dim[i]; } md.layout_desc.blocking.offset_padding = 0; return memory::desc(md); } template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to); /* * Class to represent all the resources corresponding to a tensor in TensorFlow * that are required to execute an operation (such as Convolution). */ template <typename T> class MklDnnData { private: /// MKL-DNN memory primitive for input user memory memory* user_memory_; /// MKL-DNN memory primitive in case input or output reorder is needed. memory* reorder_memory_; /// Operations memory descriptor memory::desc* op_md_; /// CPU engine on which operation will be executed const engine* cpu_engine_; public: explicit MklDnnData(const engine* e) : user_memory_(nullptr), reorder_memory_(nullptr), op_md_(nullptr), cpu_engine_(e) {} ~MklDnnData() { cpu_engine_ = nullptr; // We don't own this. delete (user_memory_); delete (reorder_memory_); delete (op_md_); } inline void* GetTensorBuffer(const Tensor* tensor) const { CHECK_NOTNULL(tensor); return const_cast<void*>( static_cast<const void*>(tensor->flat<T>().data())); } /// Set user memory primitive using specified dimensions, memory format and /// data_buffer. Function automatically uses element data type by using /// input type T used for creating call object. /// /// In a nutshell, function allows user to describe the input tensor to /// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and /// memory format HWIO, and the buffer that contains actual values is /// pointed by data_buffer. inline void SetUsrMem(const memory::dims& dim, memory::format fm, void* data_buffer = nullptr) { auto md = memory::desc(dim, MklDnnType<T>(), fm); SetUsrMem(md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, memory::format fm, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, fm, GetTensorBuffer(tensor)); } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim, const memory::dims& strides) { return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>()); } /// A version of SetUsrMem call that allows user to create memory in blocked /// format. So in addition to accepting dimensions, it also accepts strides. /// This allows user to create memory for tensor in a format that is not /// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6 /// dimensional tensor as a native format. But by using blocked format, a user /// can create memory for 6D tensor. inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, void* data_buffer = nullptr) { CHECK_EQ(dim.size(), strides.size()); auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides); SetUsrMem(blocked_md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, strides, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts memory /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) { auto pd = memory::primitive_desc(md, *cpu_engine_); SetUsrMem(pd, data_buffer); } /// A version of SetUsrMem with memory descriptor and tensor inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(md, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts primitive /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::primitive_desc& pd, void* data_buffer = nullptr) { CHECK_NOTNULL(cpu_engine_); // TODO(nhasabni): can we remove dynamic memory allocation? if (data_buffer) { user_memory_ = new memory(pd, data_buffer); } else { user_memory_ = new memory(pd); } } /// A version of SetUsrMem with primitive descriptor and tensor inline void SetUsrMem(const memory::primitive_desc& pd, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(pd, GetTensorBuffer(tensor)); } /// Get function for user memory primitive. inline const memory* GetUsrMem() const { return user_memory_; } /// Get function for primitive descriptor of user memory primitive. inline const memory::primitive_desc GetUsrMemPrimDesc() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_primitive_desc(); } /// Get function for descriptor of user memory. inline memory::desc GetUsrMemDesc() { // This is ugly. Why MKL-DNN does not provide desc() method of const type?? const memory::primitive_desc pd = GetUsrMemPrimDesc(); return const_cast<memory::primitive_desc*>(&pd)->desc(); } /// Get function for data buffer of user memory primitive. inline void* GetUsrMemDataHandle() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_data_handle(); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(void* data_buffer) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(data_buffer); user_memory_->set_data_handle(data_buffer); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(const Tensor* tensor) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(tensor); user_memory_->set_data_handle(GetTensorBuffer(tensor)); } /// Get the memory primitive for input and output of an op. If inputs /// to an op require reorders, then this function returns memory primitive /// for reorder. Otherwise, it will return memory primitive for user memory. /// /// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to /// execute Conv2D, we need memory primitive for I and F. Buf if reorder is /// required for I and F (say I_r is reorder primitive for I; F_r is reorder /// primitive for F), then we need I_r and F_r to perform Conv2D. inline const memory& GetOpMem() const { return reorder_memory_ ? *reorder_memory_ : *user_memory_; } /// Set memory descriptor of an operation in terms of dimensions and memory /// format. E.g., For Conv2D, the dimensions would be same as user dimensions /// but memory::format would be mkldnn::any because we want MKL-DNN to choose /// best layout/format for given input dimensions. inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) { // TODO(nhasabni): can we remove dynamic memory allocation? op_md_ = new memory::desc(dim, MklDnnType<T>(), fm); } /// Get function for memory descriptor for an operation inline const memory::desc& GetOpMemDesc() const { return *op_md_; } /// Predicate that checks if we need to reorder user's memory into memory /// pointed by op_pd. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const { CHECK_NOTNULL(user_memory_); return op_pd != user_memory_->get_primitive_desc(); } /// Predicate that checks if we need to reorder user's memory into memory /// based on the provided format. /// /// @input: target_format - memory format of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::format& target_format) const { CHECK_NOTNULL(user_memory_); return target_format != user_memory_->get_primitive_desc().desc().data.format; } /// Function to create a reorder from memory pointed by from to memory pointed /// by to. Returns created primitive. inline primitive CreateReorder(const memory* from, const memory* to) const { CHECK_NOTNULL(from); CHECK_NOTNULL(to); return reorder(*from, *to); } /// Function to handle input reordering /// /// Check if we need to reorder this input of an operation. /// Return true and allocate reorder memory primitive if reorder is needed. /// Otherwise, return false and do not allocate reorder memory primitive. /// /// To check if reorder is needed, this function compares memory primitive /// descriptor of an operation (op_pd) for the given input with the /// user-specified memory primitive descriptor. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately reorder_memory_ = new memory(op_pd); std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Overloaded version of above function that accepts memory buffer /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_data_handle - memory buffer where output of reorder needs to be /// stored. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd, reorder_data_handle); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle) { CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; reorder_memory_ = new memory(op_pd, reorder_data_handle); net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Another overloaded version of CheckReorderToOpMem that accepts Tensor /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_tensor - Tensor whose buffer is to be used to store output of /// reorder. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net); } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor) { CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor)); } /// Function to handle output reorder /// /// This function performs very similar functionality as input reordering /// function above. The only difference is that this function does not add /// reorder primitive to the net. The reason for this is: the reorder /// primitive for output needs to be added to the list only after operation /// has executed. But we need to prepare a temporary buffer in case output /// reorder is needed. And this temporary buffer will hold the output of /// an operation before it is fed to reorder primitive. /// /// @input memory primitive descriptor for the given output of an operation /// @return: true in case reorder of output is needed; false, otherwise. inline bool PrepareReorderToUserMemIfReq( const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); return true; } return false; } /// Function to actually insert reorder primitive in the net /// /// This function completes remaining part of output reordering. It inserts /// a reordering primitive from the temporary buffer that holds the output /// to the user-specified output buffer. /// /// @input: net - net to which to add reorder primitive inline void InsertReorderToUserMem(std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); net->push_back(CreateReorder(reorder_memory_, user_memory_)); } /// TODO: this is a faster path with reorder primitive cache compared with /// InsertReorderToUserMem(std::vector<primitive>* net), will remove /// slow path in the future inline void InsertReorderToUserMem() { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_)); stream(stream::kind::eager).submit(net).wait(); } }; /// Base class for operations with reuse of primitives /// class MklPrimitive { public: virtual ~MklPrimitive() {} // Dummy data which MKL DNN never operates on unsigned char* DummyData = nullptr; }; const mkldnn::memory::dims NONE_DIMS = {}; template <typename T> class MklPrimitiveFactory { public: MklPrimitiveFactory() {} ~MklPrimitiveFactory() {} MklPrimitive* GetOp(const string& key) { auto& map = MklPrimitiveFactory<T>::GetHashMap(); auto stream_iter = map.find(key); if (stream_iter == map.end()) { return nullptr; } else { CHECK(stream_iter->second != nullptr) << "nullptr present in map"; return stream_iter->second; } } void SetOp(const string& key, MklPrimitive* op) { auto& map = MklPrimitiveFactory<T>::GetHashMap(); auto stream_iter = map.find(key); CHECK(stream_iter == map.end()); map[key] = op; } private: static inline std::unordered_map<string, MklPrimitive*>& GetHashMap() { static thread_local std::unordered_map<string, MklPrimitive*> map_; return map_; } }; // utility class for creating keys of MKL primitive pool. class FactoryKeyCreator { public: FactoryKeyCreator() { key_.reserve(kMaxKeyLength); } ~FactoryKeyCreator() {} void AddAsKey(const string& str) { Append(str); } void AddAsKey(const mkldnn::memory::dims &dims) { for (unsigned int i = 0; i < dims.size(); i++) { AddAsKey<int>(dims[i]); } } template <typename T> void AddAsKey(const T data) { auto buffer = reinterpret_cast<const char *>(&data); Append(StringPiece(buffer, sizeof(T))); } string GetKey() { return key_; } private: string key_; const char delimiter = 'x'; const int kMaxKeyLength = 256; void Append(StringPiece s) { key_.append(s.ToString()); key_.append(1, delimiter); } }; class MklReorderPrimitive : public MklPrimitive { public: explicit MklReorderPrimitive(const memory* from, const memory* to) { Setup(from, to); } ~MklReorderPrimitive() {} std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; } void SetMemory(const memory* from, const memory* to) { context_.src_mem->set_data_handle(from->get_data_handle()); context_.dst_mem->set_data_handle(to->get_data_handle()); } private: struct ReorderContext { std::shared_ptr<mkldnn::memory> src_mem; std::shared_ptr<mkldnn::memory> dst_mem; std::shared_ptr<primitive> reorder_prim; ReorderContext(): src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) { } } context_; engine cpu_engine_ = engine(engine::cpu, 0); void Setup(const memory* from, const memory* to) { context_.src_mem.reset(new memory( {from->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.dst_mem.reset(new memory( {to->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.reorder_prim = std::make_shared<mkldnn::reorder>( reorder(*context_.src_mem, *context_.dst_mem)); } }; template <typename T> class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> { public: static MklReorderPrimitive* Get(const memory* from, const memory* to) { auto reorderPrim = static_cast<MklReorderPrimitive*>( MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to)); if (reorderPrim == nullptr) { reorderPrim = new MklReorderPrimitive(from, to); MklReorderPrimitiveFactory<T>::GetInstance().SetReorder( from, to, reorderPrim); } reorderPrim->SetMemory(from, to); return reorderPrim; } static MklReorderPrimitiveFactory & GetInstance() { static MklReorderPrimitiveFactory instance_; return instance_; } private: MklReorderPrimitiveFactory() {}; ~MklReorderPrimitiveFactory() {}; static string CreateKey(const memory* from, const memory* to) { string prefix = "reorder"; FactoryKeyCreator key_creator; auto const &from_desc = from->get_primitive_desc().desc().data; auto const &to_desc = to->get_primitive_desc().desc().data; memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]); memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]); key_creator.AddAsKey(prefix); key_creator.AddAsKey(static_cast<int>(from_desc.format)); key_creator.AddAsKey(static_cast<int>(from_desc.data_type)); key_creator.AddAsKey(from_dims); key_creator.AddAsKey(static_cast<int>(to_desc.format)); key_creator.AddAsKey(static_cast<int>(to_desc.data_type)); key_creator.AddAsKey(to_dims); return key_creator.GetKey(); } MklPrimitive* GetReorder(const memory* from, const memory* to) { string key = CreateKey(from, to); return this->GetOp(key); } void SetReorder(const memory* from, const memory* to, MklPrimitive* op) { string key = CreateKey(from, to); this->SetOp(key, op); } }; /// Fuction to find(or create) a reorder from memory pointed by from to memory pointed /// by to, it will created primitive or get primitive from pool if it is cached. /// Returns the primitive. template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to) { CHECK_NOTNULL(from); CHECK_NOTNULL(to); MklReorderPrimitive *reorder_prim = MklReorderPrimitiveFactory<T>::Get(from, to); return *reorder_prim->GetPrimitive(); } #endif // INTEL_MKL_DNN } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
GB_binop__max_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp64) // A*D function (colscale): GB (_AxD__max_fp64) // D*A function (rowscale): GB (_DxB__max_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__max_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__max_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp64) // C=scalar+B GB (_bind1st__max_fp64) // C=scalar+B' GB (_bind1st_tran__max_fp64) // C=A+scalar GB (_bind2nd__max_fp64) // C=A'+scalar GB (_bind2nd_tran__max_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmax (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmax (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_FP64 || GxB_NO_MAX_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__max_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__max_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = fmax (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = fmax (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmax (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmax (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LAGraph_grread.c
//------------------------------------------------------------------------------ // LAGraph_grread: read a matrix from a binary file //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_grread: read a matrix from a binary file. // Contributed by Tim Davis, Texas A&M, based on the Galois graph reader // file format. // The file format consists of a header, with the following content: // uint64_t version : TODO what is this? // This value is returned to the caller, but is otherwise unused. // uint64_t esize : the size of the edge weight, as sizeof (edgetype). // For example, if the file contains edge weights of type int32_t, // esize is sizeof (int32_t) == 4. The caller must specify the // corresponding GrB_Type, and its size must match esize. // uint64_t n : the number of node in the graph. The GrB_Matrix is // n-by-n. Rectangular matrices are not supported by this format. // uint64_t e : the number of edges in the graph // This header is followed by a matrix in CSR format: // Gp : an array of size ((n+1) * sizeof (uint64_t)) bytes, but Cp [0] = 0 // does not appear in the file. This section of the file is thus // (n * sizeof (uint64_t)) bytes in length. // Gj : an array of size (e * sizeof (int32_t)), containing the adjaceny // lists. Note that the indices are 32 bit, not 64 bit, and thus // this format is limited to graphs with n < 2^32. // Gx : an array of size (e * esize), containing the edge weights. // LAgraph_grread returns its status: GrB_SUCCESS if succesful, // GrB_OUT_OF_MEMORY if out of memory, GrB_INVALID_VALUE if a file I/O error // occurs or the edge size is not what was expected. #include "LAGraph_internal.h" #include <fcntl.h> #include <sys/types.h> #include <sys/uio.h> #include <unistd.h> //------------------------------------------------------------------------------ // gr_header //------------------------------------------------------------------------------ // The gr_header specifies the first 4 * sizeof(uint64_t) bytes of the file. typedef struct { uint64_t version ; // TODO what is this? uint64_t esize ; // sizeof (edgetype) uint64_t n ; // # of nodes in the graph uint64_t e ; // # of edges in the graph } gr_header ; //------------------------------------------------------------------------------ // LAGraph_binary_read //------------------------------------------------------------------------------ // Read a block of binary data from a file. Returns GrB_SUCCESS if successful, // GrB_INVALID_VALUE otherwise. static GrB_Info LAGraph_binary_read ( int fd, // file descriptor to read from void *buffer, // buffer of size nbytes to read into size_t nbytes // # of bytes to read ) { if (fd < 0) { fprintf (stderr, "LAGraph_grread: file I/O error\n") ; return (GrB_INVALID_VALUE) ; } if (read (fd, buffer, nbytes) != nbytes) { fprintf (stderr, "LAGraph_grread: file I/O error\n") ; return (GrB_INVALID_VALUE) ; } return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // LAGRAPH_FREE_ALL //------------------------------------------------------------------------------ // Free all allocated space; used only for error return. #define LAGRAPH_FREE_ALL \ { \ GrB_free (G) ; \ LAGRAPH_FREE (Gp) ; \ LAGRAPH_FREE (Gj) ; \ LAGRAPH_FREE (Gj_32) ; \ LAGRAPH_FREE (Gx) ; \ if (fd >= 0) close (fd) ; \ } //------------------------------------------------------------------------------ // LAGraph_grread //------------------------------------------------------------------------------ GrB_Info LAGraph_grread // read a matrix from a binary file ( GrB_Matrix *G, // handle of matrix to create uint64_t *G_version, // the version in the file const char *filename, // name of file to open GrB_Type gtype // type of matrix to read, NULL if no edge weights // (in that case, G has type GrB_BOOL with all // edge weights equal to 1). ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Index *Gp = NULL ; int32_t *Gj_32 = NULL ; GrB_Index *Gj = NULL ; void *Gx = NULL ; int fd = -1 ; if (G == NULL || G_version == NULL || filename == NULL) { LAGRAPH_ERROR ("invalid input arguments", GrB_NULL_POINTER) ; } (*G) = NULL ; (*G_version) = 0 ; //-------------------------------------------------------------------------- // open the file //-------------------------------------------------------------------------- fd = open (filename, O_RDONLY) ; if (fd < 0) { fprintf (stderr, "LAGraph_grread: file not found: %s\n", filename) ; LAGRAPH_ERROR ("input file not found", GrB_INVALID_VALUE) ; } //-------------------------------------------------------------------------- // open the file and read the gr_header //-------------------------------------------------------------------------- gr_header header ; LAGRAPH_OK (LAGraph_binary_read (fd, &header, sizeof (gr_header))) ; (*G_version) = header.version ; // version, TODO: what is this? uint64_t esize = header.esize ; // sizeof (edge type) uint64_t n = header.n ; // # of nodes uint64_t e = header.e ; // # of edges size_t esize_expected = 0 ; if (gtype != NULL) { LAGRAPH_OK (GxB_Type_size (&esize_expected, gtype)) ; } if (esize != esize_expected) { fprintf (stderr, "LAGraph_grread: esize in file (%g) does not match" " gtype size (%g)\n", (double) esize, (double) esize_expected) ; LAGRAPH_ERROR ("unexpected edge size", GrB_INVALID_VALUE) ; } if (n > UINT32_MAX) { LAGRAPH_ERROR ("problem too large", GrB_INVALID_VALUE) ; } //-------------------------------------------------------------------------- // allocate and read in the pointers //-------------------------------------------------------------------------- Gp = LAGraph_malloc (n+1, sizeof (GrB_Index)) ; if (Gp == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } Gp [0] = 0 ; LAGRAPH_OK (LAGraph_binary_read (fd, Gp+1, n * sizeof (GrB_Index))) ; //-------------------------------------------------------------------------- // allocate and read in the indices //-------------------------------------------------------------------------- Gj = LAGraph_malloc (e, sizeof (GrB_Index)) ; Gj_32 = LAGraph_malloc (e, sizeof (int32_t)) ; if (Gj_32 == NULL || Gj == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } // indices are in 32-bit format in the file LAGRAPH_OK (LAGraph_binary_read (fd, Gj_32, e * sizeof (int32_t))) ; // convert to 64-bit #pragma omp parallel for schedule(static) for (GrB_Index p = 0 ; p < e ; p++) { Gj [p] = (GrB_Index) Gj_32 [p] ; } LAGRAPH_FREE (Gj_32) ; //-------------------------------------------------------------------------- // read in the values //-------------------------------------------------------------------------- bool no_edge_weights = (gtype == NULL) ; if (no_edge_weights) { // the input file has no edge weights gtype = GrB_BOOL ; esize = sizeof (bool) ; } Gx = LAGraph_malloc (e, esize) ; if (Gx == NULL) LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; if (no_edge_weights) { // set all edge weights to boolean true bool *Gbool = (bool *) Gx ; #pragma omp parallel for schedule(static) for (GrB_Index p = 0 ; p < e ; p++) { Gbool [p] = true ; } } else { // read in the edge weights LAGRAPH_OK (LAGraph_binary_read (fd, Gx, e * esize)) ; } //-------------------------------------------------------------------------- // import the data into the GrB_Matrix //-------------------------------------------------------------------------- LAGRAPH_OK (GxB_Matrix_import_CSR (G, gtype, n, n, e, -1, &Gp, &Gj, &Gx, NULL)) ; //-------------------------------------------------------------------------- // close the file and return result //-------------------------------------------------------------------------- close (fd) ; return (GrB_SUCCESS) ; }
explicit_residualbased_predictorcorrector_velocity_bossak_scheme_compressible.h
/* ============================================================================== KratosStructuralApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel pooyan@cimne.upc.edu rrossi@cimne.upc.edu janosch.stascheit@rub.de nagel@sd.rub.de - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain - Ruhr-University Bochum, Institute for Structural Mechanics, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ /* ********************************************************* * * Last Modified by: $Author: Kazem $ * Date: $Date: 2008-07-25 14:48:17 $ * Revision: $Revision: 1.1 $ * * ***********************************************************/ #if !defined(KRATOS_EXPLICIT_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_SCHEME_COMPRESSIBLE ) #define KRATOS_EXPLICIT_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_SCHEME_COMPRESSIBLE /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "custom_strategies/strategies/residualbased_predictorcorrector_velocity_bossak_scheme_compressible.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" namespace Kratos { /* namespace VelocityBossakAuxiliaries { Matrix mMass; Matrix mDamp; Vector mvel; Vector macc; Vector maccold; } */ /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. This class provides the implementation of the basic tasks that are needed by the solution strategy. It is intended to be the place for tailoring the solution strategies to problem specific tasks. Detail class definition. \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ExplicitResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible : public ResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible<TSparseSpace,TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION( ExplicitResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible); typedef Scheme<TSparseSpace,TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef ModelPart::ElementsContainerType ElementsArrayType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor. */ ExplicitResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible(double NewAlphaBossak, double MoveMeshStrategy) :ResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible<TSparseSpace,TDenseSpace>(NewAlphaBossak,MoveMeshStrategy) { //default values for the Newmark Scheme //mAlphaBossak = NewAlphaBossak; //mBetaNewmark = 0.25*pow((1.00-mAlphaBossak),2); mGamma = 0.5-NewAlphaBossak; //mMeshVelocity = MoveMeshStrategy; //mGammaNewmark = 1.0; //mBetaNewmark = 0.5; //sizing work matrices //mMass.resize(10,10); //mDamp.resize(10,10); //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); std::cout << "using the ExplicitResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible" << std::endl; } /** Destructor. */ virtual ~ExplicitResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible() {} /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //************************************************************************************************ //************************************************************************************************ void InitializeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { //ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); // double calc_dt = 1.0; double& DeltaTime = r_model_part.GetProcessInfo()[DELTA_TIME]; // for(typename ModelPart::ElementsContainerType::iterator elem = r_model_part.ElementsBegin(); elem != r_model_part.ElementsEnd(); elem++) // { // //calculate min_dt // elem->Calculate(DELTA_TIME, calc_dt, CurrentProcessInfo); // if(calc_dt < DeltaTime) // DeltaTime = 0.7*calc_dt; // // } // double DeltaTime = CurrentProcessInfo[DELTA_TIME]; if (DeltaTime == 0) KRATOS_THROW_ERROR(std::logic_error, "detected delta_time = 0 in the Bossak Scheme ... check if the time step is created correctly for the current model part", ""); //initializing constants (this)->ma0 = 1.0 / (mGamma * DeltaTime); (this)->ma1 = 0.0; (this)->ma2 = (-1 + mGamma) / mGamma; (this)->ma3 = DeltaTime; (this)->ma4 = pow(DeltaTime, 2)*0.5; (this)->ma5 = 0.0; (this)->mam = 1.0 / (mGamma * DeltaTime); } //*************************************************************************** //*************************************************************************** //predicts the solution at the current step as // v = vold virtual void Predict( ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b ) { KRATOS_TRY std::cout << "prediction" << std::endl; ModelPart::NodesContainerType::iterator it_begin = r_model_part.NodesBegin(); int n_nodes = r_model_part.Nodes().size(); #pragma omp parallel for firstprivate(n_nodes, it_begin) for( int kkk = 0; kkk < n_nodes; kkk++) { ModelPart::NodesContainerType::iterator itNode = it_begin+kkk; array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); double& OldWaterPressure = (itNode)->FastGetSolutionStepValue(WATER_PRESSURE, 1); double& OldAirPressure = (itNode)->FastGetSolutionStepValue(AIR_PRESSURE, 1); //predicting velocity //ATTENTION::: the prediction is performed only on free nodes array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY); double& CurrentWaterPressure = (itNode)->FastGetSolutionStepValue(WATER_PRESSURE); double& CurrentAirPressure = (itNode)->FastGetSolutionStepValue(AIR_PRESSURE); array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION); double& CurrentWaterPressurerRate = (itNode)->FastGetSolutionStepValue(WATER_PRESSURE_DT,0); double& OldWaterPressurerRate = (itNode)->FastGetSolutionStepValue(WATER_PRESSURE_DT,1); double& CurrentAirPressurerRate = (itNode)->FastGetSolutionStepValue(AIR_PRESSURE_DT,0); double& OldAirPressurerRate = (itNode)->FastGetSolutionStepValue(AIR_PRESSURE_DT,1); if ((itNode->pGetDof(VELOCITY_X))->IsFree()) (CurrentAcceleration[0]) = OldAcceleration[0]; if (itNode->pGetDof(VELOCITY_Y)->IsFree()) (CurrentAcceleration[1]) = OldAcceleration[1]; if (itNode->HasDofFor(VELOCITY_Z)) if (itNode->pGetDof(VELOCITY_Z)->IsFree()) (CurrentAcceleration[2]) = OldAcceleration[2]; if (itNode->pGetDof(WATER_PRESSURE)->IsFree()) CurrentWaterPressurerRate = OldWaterPressurerRate; if (itNode->HasDofFor(AIR_PRESSURE)) if (itNode->pGetDof(AIR_PRESSURE)->IsFree()) CurrentAirPressurerRate = OldAirPressurerRate; // updating time derivatives ::: please note that displacements and // their time derivatives can not be consistently fixed separately UpdateVelocity(CurrentAcceleration,OldAcceleration,CurrentVelocity,OldVelocity); UpdatePressure(CurrentWaterPressurerRate, OldWaterPressurerRate, CurrentWaterPressure, OldWaterPressure); UpdatePressure(CurrentAirPressurerRate, OldAirPressurerRate, CurrentAirPressure, OldAirPressure); // UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); if ((this)->mMeshVelocity == 2) //Lagrangian { array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY) ) = itNode->FastGetSolutionStepValue(VELOCITY); (this)->UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } } std::cout << "end of prediction" << std::endl; KRATOS_CATCH("") } //*************************************************************************** //*************************************************************************** virtual void Update( ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b ) { KRATOS_TRY KRATOS_WATCH("inside update"); ModelPart::NodesContainerType::iterator it_begin = r_model_part.NodesBegin(); int n_nodes = r_model_part.Nodes().size(); //dt factor // ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // double GammaNewmark = 0.5 - NewAlphaBossak; // double DeltaTime = CurrentProcessInfo[DELTA_TIME]; //double time_fac = 1.0 / (mGamma * DeltaTime); #pragma omp parallel for firstprivate(n_nodes, it_begin) for( int kkk = 0; kkk < n_nodes; kkk++) { ModelPart::NodesContainerType::iterator ind = it_begin+kkk; //get Acceleraton array_1d<double,3>& CurrentAcceleration= ind->FastGetSolutionStepValue(ACCELERATION); CurrentAcceleration = ZeroVector(3); double& CurrentWaterPressurerRate = ind->FastGetSolutionStepValue(WATER_PRESSURE_DT,0); CurrentWaterPressurerRate = 0.0; double& CurrentAirPressurerRate = ind->FastGetSolutionStepValue(AIR_PRESSURE_DT,0); CurrentAirPressurerRate = 0.0; //get mass double vel_mass = ind->FastGetSolutionStepValue(NODAL_MASS); double water_p_mass = ind->FastGetSolutionStepValue(NODAL_MAUX);//water double air_p_mass = ind->FastGetSolutionStepValue(NODAL_PAUX);//air //get RHS const array_1d<double,3> rhs_vel = ind->FastGetSolutionStepValue(RHS);//deine temlate dim const double rhs_water_p = ind->FastGetSolutionStepValue(RHS_WATER); const double rhs_air_p = ind->FastGetSolutionStepValue(RHS_AIR); // vel_mass *= time_fac; if(vel_mass == 0.0 || (water_p_mass == 0.0 && air_p_mass==0.0)) { KRATOS_WATCH("EEEEEEEEEEEEEEEEEEEEEEEE RRRRRRRRRRRRRRRRo RRRRRRRRRRRRRRRRRR vel_mass == 0.0 "); vel_mass = 100000000.0; } // KRATOS_WATCH(vel_mass); // KRATOS_WATCH(rhs_water_p); // KRATOS_WATCH(rhs_water_p/water_p_mass); //update Acceleration if( (ind->pGetDof(VELOCITY_X))->IsFixed() == false ) CurrentAcceleration[0] = rhs_vel[0]/vel_mass; if( (ind->pGetDof(VELOCITY_Y))->IsFixed() == false ) CurrentAcceleration[1] = rhs_vel[1]/vel_mass; if( ind->HasDofFor(VELOCITY_Z)) if( ind->pGetDof(VELOCITY_Z)->IsFixed() == false ) CurrentAcceleration[2] = rhs_vel[2]/vel_mass; //update Pressurerate if( (ind->pGetDof(WATER_PRESSURE))->IsFixed() == false ) if( water_p_mass > 0.0000000000001) CurrentWaterPressurerRate = rhs_water_p/water_p_mass; if( (ind->pGetDof(AIR_PRESSURE))->IsFixed() == false ) if( air_p_mass > 0.0000000000001) CurrentAirPressurerRate = rhs_air_p/air_p_mass; //update displacement and velocity array_1d<double,3> DeltaVel; //double DeltaWaterPressure = 0.0; //double DeltaAirPressure = 0.0; array_1d<double,3>& CurrentDisplacement = (ind)->FastGetSolutionStepValue(DISPLACEMENT,0); array_1d<double,3>& OldDisplacement = (ind)->FastGetSolutionStepValue(DISPLACEMENT,1); // array_1d<double,3>& CurrentAcceleration = (ind)->FastGetSolutionStepValue(ACCELERATION,0); array_1d<double,3>& OldAcceleration = (ind)->FastGetSolutionStepValue(ACCELERATION,1); // double& CurrentWaterPressurerRate = (ind)->FastGetSolutionStepValue(WATER_PRESSURE_DT,0); double& OldWaterPressurerRate = (ind)->FastGetSolutionStepValue(WATER_PRESSURE_DT,1); // double& CurrentAirPressurerRate = (ind)->FastGetSolutionStepValue(AIR_PRESSURE_DT,0); double& OldAirPressurerRate = (ind)->FastGetSolutionStepValue(AIR_PRESSURE_DT,1); array_1d<double,3>& CurrentVelocity = (ind)->FastGetSolutionStepValue(VELOCITY,0); array_1d<double,3>& OldVelocity = (ind)->FastGetSolutionStepValue(VELOCITY,1); double& CurrentWaterPressure = (ind)->FastGetSolutionStepValue(WATER_PRESSURE,0); double& OldWaterPressure = (ind)->FastGetSolutionStepValue(WATER_PRESSURE,1); double& CurrentAirPressure = (ind)->FastGetSolutionStepValue(AIR_PRESSURE,0); double& OldAirPressure = (ind)->FastGetSolutionStepValue(AIR_PRESSURE,1); UpdateVelocity(CurrentAcceleration,OldAcceleration,CurrentVelocity,OldVelocity); UpdatePressure(CurrentWaterPressurerRate, OldWaterPressurerRate, CurrentWaterPressure, OldWaterPressure); UpdatePressure(CurrentAirPressurerRate, OldAirPressurerRate, CurrentAirPressure, OldAirPressure); if(CurrentAirPressure < 160.0) { CurrentAirPressure = 160.0;//considering min ro = .01 } if(CurrentWaterPressure < 600000.0)//this is considering that min density of water is 997 (996.69 is w pressure zero) { CurrentWaterPressure = 600000.0;//considering min ro = .01 } //to not move nodes with fixed flag if(ind->IsFixed(DISPLACEMENT_X)) CurrentDisplacement[0] = 0.0; if(ind->IsFixed(DISPLACEMENT_Y)) CurrentDisplacement[1] = 0.0; if(ind->IsFixed(DISPLACEMENT_Z)) CurrentDisplacement[2] = 0.0; ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0; ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0; ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = 0.0; if(this->mMeshVelocity == 0.0)//EUlerian { ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0; ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0; ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = 0.0; } if(this->mMeshVelocity == 1.0) { ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = ind->FastGetSolutionStepValue(VELOCITY_X,1); ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = ind->FastGetSolutionStepValue(VELOCITY_Y,1); ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = ind->FastGetSolutionStepValue(VELOCITY_Z,1); } if(this->mMeshVelocity == 2.0)//Lagrangian { ind->FastGetSolutionStepValue(MESH_VELOCITY_X) = ind->FastGetSolutionStepValue(VELOCITY_X); ind->FastGetSolutionStepValue(MESH_VELOCITY_Y) = ind->FastGetSolutionStepValue(VELOCITY_Y); ind->FastGetSolutionStepValue(MESH_VELOCITY_Z) = ind->FastGetSolutionStepValue(VELOCITY_Z); (this)->UpdateDisplacement(CurrentDisplacement,OldDisplacement,OldVelocity,OldAcceleration,CurrentAcceleration); } } KRATOS_WATCH("AFTER update vel and pr"); // //updating time derivatives // for (it2=rDofSet.begin(); it2 != rDofSet.end(); ++it2) // { //// Dof::VariableType dof_variable = (*it2)->GetVariable(); // // if ((*it2)->HasTimeDerivative()) // mpModel->Value((*it2)->GetTimeDerivative(), *it2) = Dt(**it2, CurrentTime, DeltaTime); // // if ((*it2)->HasSecondTimeDerivative()) // mpModel->Value((*it2)->GetSecondTimeDerivative(), *it2) = Dtt(**it2, CurrentTime, DeltaTime); // } KRATOS_CATCH("") } //****************************************************************************************** //****************************************************************************************** virtual void InitializeNonLinIteration( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY ModelPart::NodesContainerType::iterator it_begin = r_model_part.NodesBegin(); int n_nodes = r_model_part.Nodes().size(); double K1 = 2070000000; double K2 = 7.15; KRATOS_WATCH("inside initialize nonlinear iteration"); #pragma omp parallel for firstprivate(n_nodes, it_begin) for( int kkk = 0; kkk < n_nodes; kkk++) { ModelPart::NodesContainerType::iterator ind = it_begin+kkk; ind->FastGetSolutionStepValue(NODAL_MASS) = 0.0; ind->FastGetSolutionStepValue(NODAL_MAUX) = 0.0;//use for water_pressure mass ind->FastGetSolutionStepValue(NODAL_PAUX) = 0.0;//use for air_pressure mass ind->FastGetSolutionStepValue(RHS) = ZeroVector(3); ind->FastGetSolutionStepValue(RHS_WATER) = 0.0; ind->FastGetSolutionStepValue(RHS_AIR) = 0.0; //loop over nodes to update density and sound velocity //*********update density DENSITY_AIR const double old_rho = ind->FastGetSolutionStepValue(DENSITY_AIR ,1); double pr = ind->FastGetSolutionStepValue(AIR_PRESSURE); double old_pr = ind->FastGetSolutionStepValue(AIR_PRESSURE,1); double alpha = 1.0; // UNCOMMENT FOR IMPLOSION if(pr < 160.0) { //KRATOS_WATCH(pr); ind->FastGetSolutionStepValue(AIR_PRESSURE) = 160.0;//considering min ro = .01 // pr = 160.0; // KRATOS_WATCH("///////////////////////////// a reset air pressure////////////////////////"); // std::cout<< ind->Id()<<std::endl; } if(old_pr == 0.0 ) alpha = 1.0; else { alpha = pow(pr/old_pr, 1.0/1.4); } ind->FastGetSolutionStepValue(DENSITY_AIR ) = old_rho*alpha; //*******update water density DENSITY const double old_rho_w = ind->FastGetSolutionStepValue(DENSITY_WATER ,1); double pr_w = ind->FastGetSolutionStepValue(WATER_PRESSURE); double old_pr_w = ind->FastGetSolutionStepValue(WATER_PRESSURE,1); // UNCOMMENT FOR IMPLOSION if(pr_w < 600000.0) { // KRATOS_WATCH(pr_w); ind->FastGetSolutionStepValue(WATER_PRESSURE) = 600000.0;//this is considering that min density of water is 997 (996.69 is w pressure zero // pr_w = 600000.0; // KRATOS_WATCH("///////////////////////////// old WATER pressure reset////////////////////////"); // std::cout<< ind->Id()<<std::endl; } alpha = (pr_w + K1/K2)/(old_pr_w + K1/K2); ind->FastGetSolutionStepValue(DENSITY_WATER) = old_rho_w*pow(alpha,(1.0/K2)); //update sound velocity ResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible<TSparseSpace,TDenseSpace>::CalculateSoundVelocity(ind); }//end of loop over nodes //KRATOS_WATCH("inside initialize nonlinear iteration 000000000000000000000000000000000"); //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA //create a partition of the element array ModelPart::ElementsContainerType::iterator elem_bg = r_model_part.ElementsBegin(); int n_elems = r_model_part.Elements().size(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements // #pragma omp parallel for firstprivate(n_elems, elem_bg) for ( int jj=0; jj<n_elems; ++jj) { ModelPart::ElementsContainerType::iterator elem = elem_bg + jj; array_1d<double,3> mass_vec = ZeroVector(3); elem->Calculate(VELOCITY, mass_vec,CurrentProcessInfo);//write on air water and ebs_vel to calculate mass //add velocity mass double air_water = elem->GetValue(IS_WATER_ELEMENT); Element::GeometryType& geom = elem->GetGeometry(); for (unsigned int i = 0; i <geom.size(); i++) { //geom[i].SetLock(); // #pragma omp critical // { geom[i].FastGetSolutionStepValue(NODAL_MASS) += mass_vec[0]; if(air_water == 1.0) geom[i].FastGetSolutionStepValue(NODAL_MAUX) += mass_vec[1]; if(air_water == 0.0) geom[i].FastGetSolutionStepValue(NODAL_PAUX) += mass_vec[1]; // } //geom[i].UnSetLock(); } } //#pragma omp barrier KRATOS_WATCH("END OF INITIALIZE NonLinIteration"); KRATOS_CATCH("") } //************************************************************************************************ //************************************************************************************************ //************************************************************************************************ //************************************************************************************************ void Calculate_RHS_Contribution( Element::Pointer rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) { KRATOS_TRY int k = OpenMPUtils::ThisThread(); //Initializing the non linear iteration for the current element (rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered (rCurrentElement)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo); (rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); KRATOS_CATCH("") } //************************************************************************************************ //************************************************************************************************ void Condition_Calculate_RHS_Contribution( Condition::Pointer rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) { KRATOS_TRY int k = OpenMPUtils::ThisThread(); (rCurrentCondition) -> InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered (rCurrentCondition)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); (rCurrentCondition)->CalculateMassMatrix(mMass[k], CurrentProcessInfo); //(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); (rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); (rCurrentCondition)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); KRATOS_CATCH("") } //************************************************************************************************ //************************************************************************************** /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ //************************************************************************************************ //************************************************************************************************ void UpdateVelocity(const array_1d<double, 3 > & CurrentAcceleration, const array_1d<double, 3 > & OldAcceleration, array_1d<double, 3 > & CurrentVelocity,const array_1d<double, 3 > & OldVelocity) { noalias(CurrentVelocity) = OldVelocity + (CurrentAcceleration - (this)->ma2*OldAcceleration)/(this)->ma0; } void UpdatePressure(const double& CurrentPressureRate, const double& OldPressureRate, double& CurrentPressure,const double& OldPressure) { CurrentPressure = OldPressure + (CurrentPressureRate - (this)->ma2*OldPressureRate)/(this)->ma0; } //**************************************************************************** /** bdyn = b - D*vel */ void AddDynamicsToRHS( Element::Pointer rCurrentElement, LocalSystemVectorType& RHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& M, ProcessInfo& CurrentProcessInfo) { KRATOS_TRY // KRATOS_WATCH(RHS_Contribution); // if (M.size1() != 0) { // rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::macc, 0); // (VelocityBossakAuxiliaries::macc) *= (1.00 - mAlphaBossak); // rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::maccold, 1); // noalias(VelocityBossakAuxiliaries::macc) += mAlphaBossak * VelocityBossakAuxiliaries::maccold; // noalias(RHS_Contribution) -= prod(M, VelocityBossakAuxiliaries::macc); // } // KRATOS_WATCH(RHS_Contribution); //adding damping contribution //damping contribution // if (D.size1() != 0) { // rCurrentElement->GetFirstDerivativesVector(VelocityBossakAuxiliaries::mvel, 0); // noalias(RHS_Contribution) -= prod(D, VelocityBossakAuxiliaries::mvel); // } // KRATOS_WATCH("Empty AddDynamicsToRHS ELEMENt"); KRATOS_CATCH("") } void AddDynamicsToRHS( Condition::Pointer rCurrentElement, LocalSystemVectorType& RHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& M, ProcessInfo& CurrentProcessInfo) { //adding inertia contributionDISPLACEMENT // if (M.size1() != 0) { // rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::macc, 0); // (VelocityBossakAuxiliaries::macc) *= (1.00 - mAlphaBossak); // rCurrentElement->GetSecondDerivativesVector(VelocityBossakAuxiliaries::maccold, 1); // noalias(VelocityBossakAuxiliaries::macc) += mAlphaBossak * VelocityBossakAuxiliaries::maccold; // // noalias(RHS_Contribution) -= prod(M, VelocityBossakAuxiliaries::macc); // } //adding damping contribution //damping contribution // if (D.size1() != 0) { // rCurrentElement->GetFirstDerivativesVector(VelocityBossakAuxiliaries::mvel, 0); // noalias(RHS_Contribution) -= prod(D, VelocityBossakAuxiliaries::mvel); // } // KRATOS_WATCH("Empty AddDynamicsToRHS CONDITION"); } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ std::vector< Matrix >mMass; std::vector< Matrix >mDamp; std::vector< Vector >mvel; std::vector< Vector >macc; std::vector< Vector >maccold; double mGamma; /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME_COMPRESSIBLE defined */
cell_division_gpu.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_ #define SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_ #include <array> #include "biodynamo.h" #include "core/util/math.h" namespace bdm { // ---------------------------------------------------------------------------- // Starting with 8 cells, we let each cell grow in volume up until a point // a cell must divide. This tests whether the GPU accelerated mechanical // interactions properly handle the creation of new cells. // ----------------------------------------------------------------------------- inline void ExpectArrayNear(const Double3& actual, const Double3& expected, bool* ret) { for (size_t i = 0; i < actual.size(); i++) { if (std::fabs(expected[i] - actual[i]) > 1e-9) { *ret = false; std::cout << "Wrong result! Expected " << expected[i] << ", but instead got " << actual[i] << ", which is a difference of " << std::fabs(expected[i] - actual[i]) << ", which is larger than 1e-9" << std::endl; } } } enum ExecutionMode { kCpu, kCuda, kOpenCl }; inline void RunTest(bool* result, ExecutionMode mode) { auto set_param = [&](auto* param) { switch (mode) { case kCpu: break; case kOpenCl: param->use_opencl_ = true; case kCuda: param->use_gpu_ = true; } }; Simulation simulation("cell_division_gpu", set_param); auto* rm = simulation.GetResourceManager(); auto* param = simulation.GetParam(); rm->Clear(); // We need to give every test the same seed for the RNG, because in the cell // division, random numbers are used. Within a single executable these numbers // vary. Also within the threads this needs to be enforced #pragma omp parallel simulation.GetRandom()->SetSeed(1); size_t cells_per_dim = 2; auto construct = [](const Double3& position) { auto* cell = new Cell(position); cell->SetDiameter(30); cell->SetAdherence(0.4); cell->SetMass(1.0); cell->AddBiologyModule(new GrowDivide(30.05, 5000, {gAllEventIds})); return cell; }; for (size_t x = 0; x < cells_per_dim; x++) { double x_pos = x * 20.0; for (size_t y = 0; y < cells_per_dim; y++) { double y_pos = y * 20.0; for (size_t z = 0; z < cells_per_dim; z++) { auto new_simulation_object = construct({x_pos, y_pos, z * 20.0}); rm->push_back(new_simulation_object); } } } // Run for 10 timesteps. In step 2 a division should take place. In step 3 // these new cells are instantiated simulation.GetScheduler()->Simulate(10); ExpectArrayNear( rm->GetSimObject(0)->GetPosition(), {4.1399071506916413909, -5.9871942139195297727, 2.8344890446256703065}, result); ExpectArrayNear( rm->GetSimObject(1)->GetPosition(), {-2.4263219149482031511, -1.4202336557809887019, 29.769029317615839147}, result); ExpectArrayNear( rm->GetSimObject(2)->GetPosition(), {-4.9118212650644856865, 23.156656083480623209, -9.1231684411316447125}, result); ExpectArrayNear( rm->GetSimObject(3)->GetPosition(), {4.3076765979041251597, 15.615300607043293368, 25.657658447555828474}, result); ExpectArrayNear( rm->GetSimObject(4)->GetPosition(), {28.139314619772036963, -0.20987998233654170388, 4.6381417441282613012}, result); ExpectArrayNear( rm->GetSimObject(5)->GetPosition(), {24.417550786690171094, 3.347525366344008102, 28.067824703341415216}, result); ExpectArrayNear( rm->GetSimObject(6)->GetPosition(), {16.614520566718258721, 15.828015607618416638, -4.8357284569095106974}, result); ExpectArrayNear( rm->GetSimObject(7)->GetPosition(), {14.446017269290647889, 22.250832446808978204, 20.180438615017894932}, result); } inline int Simulate(int argc, const char** argv) { bool result = true; // TODO(ahmad): after Trello card ("Fix inconsistency in cell state due to // direct updates in Biology Modules") // enable multithreading, and adjust results if necessary omp_set_num_threads(1); // Run CPU version RunTest(&result, kCpu); // Run GPU (CUDA) version RunTest(&result, kCuda); // Run GPU (OpenCL) version RunTest(&result, kOpenCl); return !result; } } // namespace bdm #endif // SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
xpose.c
#include "../bib.h" #include "../types.h" #include "../cdefs.h" #include "safeomp.h" #define BLOCKSIZE 8 // TODO check cache line explicitly int bib_xpose(cmat_r x, mat_r tx) { CHECKIFSAME(x, tx); const len_t m = x->nrows; const len_t n = x->ncols; const double *const x_data = x->data; double *const tx_data = tx->data; if (m != tx->ncols || n != tx->nrows) return LIBBIB_INDIMMISMATCH; #pragma omp parallel for default(none) schedule(dynamic, 1) if(n>OMP_MIN_SIZE) for (len_t j=0; j<n; j+=BLOCKSIZE) { for (len_t i=0; i<m; i+=BLOCKSIZE) { for (len_t col=j; col<j+BLOCKSIZE && col<n; ++col) { for (len_t row=i; row<i+BLOCKSIZE && row<m; ++row) tx_data[col + n*row] = x_data[row + m*col]; } } } return LIBBIB_OK; } int bib_xpose_a(cmat_r x, dmatrix_t **restrict tx) { CHECKIFSAME(x, *tx); *tx = newmat(NCOLS(x), NROWS(x)); if (DATA(*tx) == NULL) return LIBBIB_BADMALLOC; return bib_xpose(x, *tx); }
kmeans.c
/** @file kmeans.c ** @brief K-means - Declaration ** @author Andrea Vedaldi, David Novotny **/ /* Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson. Copyright (C) 2013 Andrea Vedaldi and David Novotny. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page kmeans K-means clustering @author Andrea Vedaldi @author David Novotny @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @ref kmeans.h implements a number of algorithm for **K-means quantization**: Lloyd @cite{lloyd82least}, an accelerated version by Elkan @cite{elkan03using}, and a large scale algorithm based on Approximate Nearest Neighbors (ANN). All algorithms support @c float or @c double data and can use the $l^1$ or the $l^2$ distance for clustering. Furthermore, all algorithms can take advantage of multiple CPU cores. Please see @subpage kmeans-fundamentals for a technical description of K-means and of the algorithms implemented here. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-starting Getting started <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The goal of K-means is to partition a dataset into $K$ &ldquo;compact&rdquo; clusters. The following example demonstrates using @ref kmeans.h in the C programming language to partition @c numData @c float vectors into compute @c numCenters clusters using Lloyd's algorithm: @code #include <vl/kmeans.h> double energy ; double * centers ; // Use float data and the L2 distance for clustering KMeans * kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ; // Use Lloyd algorithm vl_kmeans_set_algorithm (kmeans, VlKMeansLloyd) ; // Initialize the cluster centers by randomly sampling the data vl_kmeans_init_centers_with_rand_data (kmeans, data, dimension, numData, numCenters) ; // Run at most 100 iterations of cluster refinement using Lloyd algorithm vl_kmeans_set_max_num_iterations (kmeans, 100) ; vl_kmeans_refine_centers (kmeans, data, numData) ; // Obtain the energy of the solution energy = vl_kmeans_get_energy(kmeans) ; // Obtain the cluster centers centers = vl_kmeans_get_centers(kmeans) ; @endcode Once the centers have been obtained, new data points can be assigned to clusters by using the ::vl_kmeans_quantize function: @code vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData) ; float * distances = vl_malloc(sizeof(float) * numData) ; vl_kmeans_quantize(kmeans, assignments, distances, data, numData) ; @endcode Alternatively, one can directly assign new pointers to the closest centers, without bothering with a ::VlKMeans object. There are several considerations that may impact the performance of KMeans. First, since K-means is usually based local optimization algorithm, the **initialization method** is important. The following initialization methods are supported: Method | Function | Description ---------------|-----------------------------------------|----------------------------------------------- Random samples | ::vl_kmeans_init_centers_with_rand_data | Random data points K-means++ | ::vl_kmeans_init_centers_plus_plus | Random selection biased towards diversity Custom | ::vl_kmeans_set_centers | Choose centers (useful to run quantization only) See @ref kmeans-init for further details. The initialization methods use a randomized selection of the data points; the random number generator init is controlled by ::vl_rand_init. The second important choice is the **optimization algorithm**. The following optimization algorithms are supported: Algorithm | Symbol | See | Description ------------|------------------|-------------------|----------------------------------------------- Lloyd | ::VlKMeansLloyd | @ref kmeans-lloyd | Alternate EM-style optimization Elkan | ::VlKMeansElkan | @ref kmeans-elkan | A speedup using triangular inequalities ANN | ::VlKMeansANN | @ref kmeans-ann | A speedup using approximated nearest neighbors See the relative sections for further details. These algorithm are iterative, and stop when either a **maximum number of iterations** (::vl_kmeans_set_max_num_iterations) is reached, or when the energy changes sufficiently slowly in one iteration (::vl_kmeans_set_min_energy_variation). All the three algorithms support multithreaded computations. The number of threads used is usually controlled globally by ::vl_set_num_threads. **/ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page kmeans-fundamentals K-means fundamentals @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> Given $n$ points $\bx_1,\dots,\bx_n \in \real^d$, the goal of K-means is find $K$ `centers` $\bc_1,\dots,\bc_m \in \real^d$ and `assignments` $q_1,\dots,q_n \in \{1,\dots,K\}$ of the points to the centers such that the sum of distances \[ E(\bc_1,\dots,\bc_k,q_1,\dots,q_n) = \sum_{i=1}^n \|\bx_i - \bc_{q_i} \|_p^p \] is minimized. $K$-means is obtained for the case $p=2$ ($l^2$ norm), because in this case the optimal centers are the means of the input vectors assigned to them. Here the generalization $p=1$ ($l^1$ norm) will also be considered. Up to normalization, the K-means objective $E$ is also the average reconstruction error if the original points are approximated with the cluster centers. Thus K-means is used not only to group the input points into cluster, but also to `quantize` their values. K-means is widely used in computer vision, for example in the construction of vocabularies of visual features (visual words). In these applications the number $n$ of points to cluster and/or the number $K$ of clusters is often large. Unfortunately, minimizing the objective $E$ is in general a difficult combinatorial problem, so locally optimal or approximated solutions are sought instead. The basic K-means algorithm alternate between re-estimating the centers and the assignments (@ref kmeans-lloyd). Combined with a good initialization strategy (@ref kmeans-init) and, potentially, by re-running the optimization from a number of randomized starting states, this algorithm may attain satisfactory solutions in practice. However, despite its simplicity, Lloyd's algorithm is often too slow. A good replacement is Elkan's algorithm (@ref kmeans-elkan), which uses the triangular inequality to cut down significantly the cost of Lloyd's algorithm. Since this algorithm is otherwise equivalent, it should often be preferred. For very large problems (millions of point to clusters and hundreds, thousands, or more clusters to find), even Elkan's algorithm is not sufficiently fast. In these cases, one can resort to a variant of Lloyd's algorithm that uses an approximated nearest neighbors routine (@ref kmeans-ann). <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-init Initialization methods <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> All the $K$-means algorithms considered here find locally optimal solutions; as such the way they are initialized is important. @ref kmeans.h supports the following initialization algorithms: @par Random data samples The simplest initialization method is to sample $K$ points at random from the input data and use them as initial values for the cluster centers. @par K-means++ @cite{arthur07k-means} proposes a randomized initialization of the centers which improves upon random selection. The first center $\bc_1$ is selected at random from the data points $\bx_1, \dots, \bx_n $ and the distance from this center to all points $\|\bx_i - \bc_1\|_p^p$ is computed. Then the second center $\bc_2$ is selected at random from the data points with probability proportional to the distance. The procedure is repeated to obtain the other centers by using the minimum distance to the centers collected so far. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-lloyd Lloyd's algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The most common K-means method is Lloyd's algorithm @cite{lloyd82least}. This algorithm is based on the observation that, while jointly optimizing clusters and assignment is difficult, optimizing one given the other is easy. Lloyd's algorithm alternates the steps: 1. **Quantization.** Each point $\bx_i$ is reassigned to the center $\bc_{q_j}$ closer to it. This requires finding for each point the closest among $K$ other points, which is potentially slow. 2. **Center estimation.** Each center $\bc_q$ is updated to minimize its average distances to the points assigned to it. It is easy to show that the best center is the mean or median of the points, respectively if the $l^2$ or $l^1$ norm is considered. A naive implementation of the assignment step requires $O(dnK)$ operations, where $d$ is the dimensionality of the data, $n$ the number of data points, and $K$ the number of centers. Updating the centers is much cheaper: $O(dn)$ operations suffice to compute the $K$ means and a slightly higher cost is required for the medians. Clearly, the bottleneck is the assignment computation, and this is what the other K-means algorithm try to improve. During the iterations, it can happen that a cluster becomes empty. In this case, K-means automatically **&ldquo;restarts&rdquo; the cluster** center by selecting a training point at random. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-elkan Elkan's algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> Elkan's algorithm @cite{elkan03using} is a variation of Lloyd alternate optimization algorithm (@ref kmeans-lloyd) that uses the triangular inequality to avoid many distance calculations when assigning points to clusters. While much faster than Lloyd, Elkan's method uses storage proportional to the umber of clusters by data points, which makes it unpractical for a very large number of clusters. The idea of this algorithm is that, if a center update does not move them much, then most of the point-to-center computations can be avoided when the point-to-center assignments are recomputed. To detect which distances need evaluation, the triangular inequality is used to lower and upper bound distances after a center update. Elkan algorithms uses two key observations. First, one has \[ \|\bx_i - \bc_{q_i}\|_p \leq \|\bc - \bc_{q_i}\|_p / 2 \quad\Rightarrow\quad \|\bx_i - \bc_{q_i}\|_p \leq \|\bx_i - \bc\|_p. \] Thus if the distance between $\bx_i$ and its current center $\bc_{q_i}$ is less than half the distance of the center $\bc_{q_i}$ to another center $\bc$, then $\bc$ can be skipped when the new assignment for $\bx_i$ is searched. Checking this requires keeping track of all the inter-center distances, but centers are typically a small fraction of the training data, so overall this can be a significant saving. In particular, if this condition is satisfied for all the centers $\bc \not= \bc_{q_i}$, the point $\bx_i$ can be skipped completely. Furthermore, the condition can be tested also based on an upper bound $UB_i$ of $\|\bx_i - \bc_{q_i}\|_p$. Second, if a center $\bc$ is updated to $\hat{\bc}$, then the new distance from $\bx$ to $\hat{\bc}$ is bounded from below and above by \[ \|\bx - \bc\|_p - \|bc - \hat\bc\|_p \leq \|\bx - \hat{\bc}\|_p \leq \|\bx - \hat{\bc}\|_p + \|\bc + \hat{\bc}\|_p. \] This allows to maintain an upper bound on the distance of $\bx_i$ to its current center $\bc_{q_i}$ and a lower bound to any other center $\bc$: @f{align*} UB_i & \leftarrow UB_i + \|\bc_{q_i} - \hat{\bc}_{q_i} \|_p \\ LB_i(\bc) & \leftarrow LB_i(\bc) - \|\bc -\hat \bc\|_p. @f} Thus the K-means algorithm becomes: 1. **Initialization.** Compute $LB_i(\bc) = \|\bx_i -\hat \bc\|_p$ for all points and centers. Find the current assignments $q_i$ and bounds $UB_i$ by finding the closest centers to each point: $UB_i = \min_{\bc} LB_i(\bc)$. 2. **Center estimation.** 1. Recompute all the centers based on the new means; call the updated version $\hat{\bc}$. 2. Update all the bounds based on the distance $\|\bc - \hat\bc\|_p$ as explained above. 3. Set $\bc \leftarrow \hat\bc$ for all the centers and go to the next iteration. 3. **Quantization.** 1. Skip any point $\bx_i$ such that $UB_i \leq \frac{1}{2} \|\bc_{q_i} - \bc\|_p$ for all centers $\bc \not= \bc_{q_i}$. 2. For each remaining point $\bx_i$ and center $\bc \not= \bc_{q_i}$: 1. Skip $\bc$ if \[ UB_i \leq \frac{1}{2} \| \bc_{q_i} - \bc \| \quad\text{or}\quad UB_i \leq LB_i(\bc). \] The first condition reflects the first observation above; the second uses the bounds to decide if $\bc$ can be closer than the current center $\bc_{q_i}$ to the point $\bx_i$. If the center cannot be skipped, continue as follows. 3. Skip $\bc$ if the condition above is satisfied after making the upper bound tight: \[ UB_i = LB_i(\bc_{q_i}) = \| \bx_i - \bc_{q_i} \|_p. \] Note that the latter calculation can be done only once for $\bx_i$. If the center cannot be skipped still, continue as follows. 4. Tighten the lower bound too: \[ LB_i(\bc) = \| \bx_i - \bc \|_p. \] At this point both $UB_i$ and $LB_i(\bc)$ are tight. If $LB_i < UB_i$, then the point $\bx_i$ should be reassigned to $\bc$. Update $q_i$ to the index of center $\bc$ and reset $UB_i = LB_i(\bc)$. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-ann ANN algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The *Approximate Nearest Neighbor* (ANN) K-means algorithm @cite{beis97shape} @cite{silpa-anan08optimised} @cite{muja09fast} is a variant of Lloyd's algorithm (@ref kmeans-lloyd) uses a best-bin-first randomized KD-tree algorithm to approximately (and quickly) find the closest cluster center to each point. The KD-tree implementation is based on @ref kdtree. The algorithm can be summarized as follows: 1. **Quantization.** Each point $\bx_i$ is reassigned to the center $\bc_{q_j}$ closer to it. This starts by indexing the $K$ centers by a KD-tree and then using the latter to quickly find the closest center for every training point. The search is approximated to further improve speed. This opens up the possibility that a data point may receive an assignment that is *worse* than the current one. This is avoided by checking that the new assignment estimated by using ANN is an improvement; otherwise the old assignment is kept. 2. **Center estimation.** Each center $\bc_q$ is updated to minimize its average distances to the points assigned to it. It is easy to show that the best center is the mean or median of the points, respectively if the $l^2$ or $l^1$ norm is considered. The key is to trade-off carefully the speedup obtained by using the ANN algorithm and the loss in accuracy when retrieving neighbors. Due to the curse of dimensionality, KD-trees become less effective for higher dimensional data, so that the search cost, which in the best case is logarithmic with this data structure, may become effectively linear. This is somehow mitigated by the fact that new a new KD-tree is computed at each iteration, reducing the likelihood that points may get stuck with sub-optimal assignments. Experiments with the quantization of 128-dimensional SIFT features show that the ANN algorithm may use one quarter of the comparisons of Elkan's while retaining a similar solution accuracy. */ #include "kmeans.h" #include "generic.h" #include "mathop.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Reset state ** ** The function reset the state of the KMeans object. It deletes ** any stored centers, releasing the corresponding memory. This ** cancels the effect of seeding or setting the centers, but ** does not change the other configuration parameters. **/ void vl_kmeans_reset (VlKMeans * self) { self->numCenters = 0 ; self->dimension = 0 ; if (self->centers) vl_free(self->centers) ; if (self->centerDistances) vl_free(self->centerDistances) ; self->centers = NULL ; self->centerDistances = NULL ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object ** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE) ** @param distance distance. ** @return new KMeans object instance. **/ VlKMeans * vl_kmeans_new (vl_type dataType, VlVectorComparisonType distance) { VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ; self->algorithm = VlKMeansLloyd ; self->distance = distance ; self->dataType = dataType ; self->verbosity = 0 ; self->maxNumIterations = 100 ; self->minEnergyVariation = 1e-4 ; self->numRepetitions = 1 ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = 3; self->maxNumComparisons = 100; vl_kmeans_reset (self) ; return self ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object by copy ** @param kmeans KMeans object to copy. ** @return new copy. **/ VlKMeans * vl_kmeans_new_copy (VlKMeans const * kmeans) { VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ; self->algorithm = kmeans->algorithm ; self->distance = kmeans->distance ; self->dataType = kmeans->dataType ; self->verbosity = kmeans->verbosity ; self->maxNumIterations = kmeans->maxNumIterations ; self->numRepetitions = kmeans->numRepetitions ; self->dimension = kmeans->dimension ; self->numCenters = kmeans->numCenters ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = kmeans->numTrees; self->maxNumComparisons = kmeans->maxNumComparisons; if (kmeans->centers) { vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ; self->centers = vl_malloc(dataSize) ; memcpy (self->centers, kmeans->centers, dataSize) ; } if (kmeans->centerDistances) { vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ; self->centerDistances = vl_malloc(dataSize) ; memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ; } return self ; } /** ------------------------------------------------------------------ ** @brief Deletes a KMeans object ** @param self KMeans object instance. ** ** The function deletes the KMeans object instance created ** by ::vl_kmeans_new. **/ void vl_kmeans_delete (VlKMeans * self) { vl_kmeans_reset (self) ; vl_free (self) ; } /* an helper structure */ typedef struct _VlKMeansSortWrapper { vl_uint32 * permutation ; void const * data ; vl_size stride ; } VlKMeansSortWrapper ; /* ---------------------------------------------------------------- */ /* Instantiate shuffle algorithm */ #define VL_SHUFFLE_type vl_uindex #define VL_SHUFFLE_prefix _vl_kmeans #include "shuffle-def.h" /* #ifdef VL_KMEANS_INSTANTITATING */ #endif /* ================================================================ */ #ifdef VL_KMEANS_INSTANTIATING /* ---------------------------------------------------------------- */ /* Set centers */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_set_centers_, SFX) (VlKMeans * self, TYPE const * centers, vl_size dimension, vl_size numCenters) { self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; memcpy ((TYPE*)self->centers, centers, sizeof(TYPE) * dimension * numCenters) ; } /* ---------------------------------------------------------------- */ /* Random seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex i, j, k ; VlRand * rand = vl_get_rand () ; self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; { vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ; /* get a random permutation of the data point */ for (i = 0 ; i < numData ; ++i) perm[i] = i ; _vl_kmeans_shuffle (perm, numData, rand) ; for (k = 0, i = 0 ; k < numCenters ; ++ i) { /* compare the next data point to all centers collected so far to detect duplicates (if there are enough left) */ if (numCenters - k < numData - i) { vl_bool duplicateDetected = VL_FALSE ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances, dimension, data + dimension * perm[i], 1, (TYPE*)self->centers, k, distFn) ; for (j = 0 ; j < k ; ++j) { duplicateDetected |= (distances[j] == 0) ; } if (duplicateDetected) continue ; } /* ok, it is not a duplicate so we can accept it! */ memcpy ((TYPE*)self->centers + dimension * k, data + dimension * perm[i], sizeof(TYPE) * dimension) ; k ++ ; } vl_free(distances) ; vl_free(perm) ; } } /* ---------------------------------------------------------------- */ /* kmeans++ seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex x, c ; VlRand * rand = vl_get_rand () ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = (TYPE) VL_INFINITY_D ; } /* select the first point at random */ x = vl_rand_uindex (rand, numData) ; c = 0 ; while (1) { TYPE energy = 0 ; TYPE acc = 0 ; TYPE thresh = (TYPE) vl_rand_real1 (rand) ; memcpy ((TYPE*)self->centers + c * dimension, data + x * dimension, sizeof(TYPE) * dimension) ; c ++ ; if (c == numCenters) break ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX) (distances, dimension, (TYPE*)self->centers + (c - 1) * dimension, 1, data, numData, distFn) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = VL_MIN(minDistances[x], distances[x]) ; energy += minDistances[x] ; } for (x = 0 ; x < numData - 1 ; ++x) { acc += minDistances[x] ; if (acc >= thresh * energy) break ; } } vl_free(distances) ; vl_free(minDistances) ; } /* ---------------------------------------------------------------- */ /* Quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData) { vl_index i ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif #ifdef _OPENMP #pragma omp parallel default(none) \ shared(self, distances, assignments, numData, distFn, data) \ num_threads(vl_get_max_threads()) #endif { /* vl_malloc cannot be used here if mapped to MATLAB malloc */ TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ; #ifdef _OPENMP #pragma omp for #endif for (i = 0 ; i < (signed)numData ; ++i) { vl_uindex k ; TYPE bestDistance = (TYPE) VL_INFINITY_D ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters, self->dimension, data + self->dimension * i, 1, (TYPE*)self->centers, self->numCenters, distFn) ; for (k = 0 ; k < self->numCenters ; ++k) { if (distanceToCenters[k] < bestDistance) { bestDistance = distanceToCenters[k] ; assignments[i] = (vl_uint32)k ; } } if (distances) distances[i] = bestDistance ; } free(distanceToCenters) ; } } /* ---------------------------------------------------------------- */ /* ANN quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_ann_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData, vl_bool update) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ; vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons); vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN); vl_kdforest_build(forest,self->numCenters,self->centers); #ifdef _OPENMP #pragma omp parallel default(none) \ num_threads(vl_get_max_threads()) \ shared(self, forest, update, assignments, distances, data, numData, distFn) #endif { VlKDForestNeighbor neighbor ; VlKDForestSearcher * searcher ; vl_index x; #ifdef _OPENMP #pragma omp critical #endif searcher = vl_kdforest_new_searcher (forest) ; #ifdef _OPENMP #pragma omp for #endif for(x = 0 ; x < (signed)numData ; ++x) { vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension)); if (distances) { if(!update) { distances[x] = (TYPE) neighbor.distance; assignments[x] = (vl_uint32) neighbor.index ; } else { TYPE prevDist = (TYPE) distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension *assignments[x]); if (prevDist > (TYPE) neighbor.distance) { distances[x] = (TYPE) neighbor.distance ; assignments[x] = (vl_uint32) neighbor.index ; } else { distances[x] = prevDist ; } } } else { assignments[x] = (vl_uint32) neighbor.index ; } } /* end for */ } /* end of parallel region */ vl_kdforest_delete(forest); } /* ---------------------------------------------------------------- */ /* Helper functions */ /* ---------------------------------------------------------------- */ /* The sorting routine is used to find increasing permutation of each * data dimension. This is used to quickly find the median for l1 * distance clustering. */ VL_INLINE TYPE VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { return ((TYPE*)array->data) [array->permutation[indexA] * array->stride] - ((TYPE*)array->data) [array->permutation[indexB] * array->stride] ; } VL_INLINE void VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { vl_uint32 tmp = array->permutation[indexA] ; array->permutation[indexA] = array->permutation[indexB] ; array->permutation[indexB] = tmp ; } #define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort) #define VL_QSORT_array VlKMeansSortWrapper* #define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) #define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) #include "qsort-def.h" static void VL_XCAT(_vl_kmeans_sort_data_helper_, SFX) (VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData) { vl_uindex d, x ; for (d = 0 ; d < self->dimension ; ++d) { VlKMeansSortWrapper array ; array.permutation = permutations + d * numData ; array.data = data + d ; array.stride = self->dimension ; for (x = 0 ; x < numData ; ++x) { array.permutation[x] = (vl_uint32)x ; } VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ; } } /* ---------------------------------------------------------------- */ /* Lloyd refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double previousEnergy = VL_INFINITY_D ; double initialEnergy = VL_INFINITY_D ; double energy ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ; } break ; } if (energy == previousEnergy) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ; } break ; } if (iteration == 0) { initialEnergy = energy ; } else { double eps = (previousEnergy - energy) / (initialEnergy - energy) ; if (eps < self->minEnergyVariation) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ; } break ; } } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } /* next Lloyd iteration */ if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } static double VL_XCAT(_vl_kmeans_update_center_distances_, SFX) (VlKMeans * self) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif if (! self->centerDistances) { self->centerDistances = vl_malloc (sizeof(TYPE) * self->numCenters * self->numCenters) ; } VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances, self->dimension, self->centers, self->numCenters, NULL, 0, distFn) ; return self->numCenters * (self->numCenters - 1) / 2 ; } static double VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double initialEnergy = VL_INFINITY_D ; double previousEnergy = VL_INFINITY_D ; double energy ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ; } break ; } if (energy == previousEnergy) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ; } break ; } if (iteration == 0) { initialEnergy = energy ; } else { double eps = (previousEnergy - energy) / (initialEnergy - energy) ; if (eps < self->minEnergyVariation) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ; } break ; } } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: VL_PRINT("bad distance set: %d\n",self->distance); abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } /* ---------------------------------------------------------------- */ /* Elkan refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size d, iteration ; vl_index x ; vl_uint32 c, j ; vl_bool allDone ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; VlRand * rand = vl_get_rand () ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ; vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ; TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ; TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ; TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; double energy ; vl_size totDistanceComputationsToInit = 0 ; vl_size totDistanceComputationsToRefreshUB = 0 ; vl_size totDistanceComputationsToRefreshLB = 0 ; vl_size totDistanceComputationsToRefreshCenterDistances = 0 ; vl_size totDistanceComputationsToNewCenters = 0 ; vl_size totDistanceComputationsToFinalize = 0 ; vl_size totNumRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Initialization */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* An iteration is: get_new_centers + reassign + get_energy. This counts as iteration 0, where get_new_centers is assumed to be performed before calling the train function by the initialization function */ /* update distances between centers */ totDistanceComputationsToInit += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; /* assigmen points to the initial centers and initialize bounds */ memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE distance ; /* do the first center */ assignments[x] = 0 ; distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + 0) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[0 + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; /* do other centers */ for (c = 1 ; c < self->numCenters ; ++c) { /* Can skip if the center assigned so far is twice as close as its distance to the center under consideration */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + assignments[x] * self->numCenters]) { continue ; } distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; pointToCenterLB[c + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; if (distance < pointToClosestCenterUB[x]) { pointToClosestCenterUB[x] = distance ; assignments[x] = c ; } } } /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n", energy, totDistanceComputationsToInit) ; } /* #define SANITY*/ #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after initial assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n", cc, xx, a, b) ; } } } #endif /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Iterations */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ for (iteration = 1 ; 1; ++iteration) { vl_size numDistanceComputationsToRefreshUB = 0 ; vl_size numDistanceComputationsToRefreshLB = 0 ; vl_size numDistanceComputationsToRefreshCenterDistances = 0 ; vl_size numDistanceComputationsToNewCenters = 0 ; vl_size numRestartedCenters = 0 ; /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Compute new centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { clusterMasses[assignments[x]] ++ ; } switch (self->distance) { case VlDistanceL2: memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE * cpt = newCenters + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = newCenters + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { /* restart the center */ vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < (signed)numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { newCenters [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = newCenters + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; default: abort(); } /* done compute centers */ /* compute the distance from the old centers to the new centers */ for (c = 0 ; c < self->numCenters ; ++c) { TYPE distance = distFn(self->dimension, newCenters + c * self->dimension, (TYPE*)self->centers + c * self->dimension) ; centerToNewCenterDistances[c] = distance ; numDistanceComputationsToNewCenters += 1 ; } /* make the new centers current */ { TYPE * tmp = self->centers ; self->centers = newCenters ; newCenters = tmp ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Reassign points to a centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Update distances between centers. */ numDistanceComputationsToRefreshCenterDistances += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; for (c = 0 ; c < self->numCenters ; ++c) { nextCenterDistances[c] = (TYPE) VL_INFINITY_D ; for (j = 0 ; j < self->numCenters ; ++j) { if (j == c) continue ; nextCenterDistances[c] = VL_MIN(nextCenterDistances[c], ((TYPE*)self->centerDistances) [j + c * self->numCenters]) ; } } /* Update upper bounds on point-to-closest-center distances based on the center variation. */ for (x = 0 ; x < (signed)numData ; ++x) { TYPE a = pointToClosestCenterUB[x] ; TYPE b = centerToNewCenterDistances[assignments[x]] ; if (self->distance == VlDistanceL1) { pointToClosestCenterUB[x] = a + b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ; } pointToClosestCenterUBIsStrict[x] = VL_FALSE ; } /* Update lower bounds on point-to-center distances based on the center variation. */ #if defined(_OPENMP) #pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++x) { for (c = 0 ; c < self->numCenters ; ++c) { TYPE a = pointToCenterLB[c + x * self->numCenters] ; TYPE b = centerToNewCenterDistances[c] ; if (a < b) { pointToCenterLB[c + x * self->numCenters] = 0 ; } else { if (self->distance == VlDistanceL1) { pointToCenterLB[c + x * self->numCenters] = a - b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ; } } } } #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies before assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* Scan the data and do the reassignments. Use the bounds to skip as many point-to-center distance calculations as possible. */ allDone = VL_TRUE ; #if defined(_OPENMP) #pragma omp parallel for \ default(none) \ shared(self,numData, \ pointToClosestCenterUB,pointToCenterLB, \ nextCenterDistances,pointToClosestCenterUBIsStrict, \ assignments,data,distFn,allDone) \ private(c,x) \ reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \ num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++ x) { /* A point x sticks with its current center assignmets[x] the UB to d(x, c[assigmnets[x]]) is not larger than half the distance of c[assigments[x]] to any other center c. */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) { continue ; } for (c = 0 ; c < self->numCenters ; ++c) { vl_uint32 cx = assignments[x] ; TYPE distance ; /* The point is not reassigned to a given center c if either: 0 - c is already the assigned center 1 - The UB of d(x, c[assignments[x]]) is smaller than half the distance of c[assigments[x]] to c, OR 2 - The UB of d(x, c[assignmets[x]]) is smaller than the LB of the distance of x to c. */ if (cx == c) { continue ; } if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } /* If the UB is loose, try recomputing it and test again */ if (! pointToClosestCenterUBIsStrict[x]) { distance = distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[cx + x * self->numCenters] = distance ; numDistanceComputationsToRefreshUB += 1 ; if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } } /* Now the UB is strict (equal to d(x, assignments[x])), but we still could not exclude that x should be reassigned to c. We therefore compute the distance, update the LB, and check if a reassigmnet must be made */ distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; numDistanceComputationsToRefreshLB += 1 ; pointToCenterLB[c + x * self->numCenters] = distance ; if (distance < pointToClosestCenterUB[x]) { assignments[x] = c ; pointToClosestCenterUB[x] = distance ; allDone = VL_FALSE ; /* the UB strict flag is already set here */ } } /* assign center */ } /* next data point */ totDistanceComputationsToRefreshUB += numDistanceComputationsToRefreshUB ; totDistanceComputationsToRefreshLB += numDistanceComputationsToRefreshLB ; totDistanceComputationsToRefreshCenterDistances += numDistanceComputationsToRefreshCenterDistances ; totDistanceComputationsToNewCenters += numDistanceComputationsToNewCenters ; totNumRestartedCenters += numRestartedCenters ; #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { vl_size numDistanceComputations = numDistanceComputationsToRefreshUB + numDistanceComputationsToRefreshLB + numDistanceComputationsToRefreshCenterDistances + numDistanceComputationsToNewCenters ; VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n", iteration, energy, numDistanceComputations) ; if (numRestartedCenters) { VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n", iteration, energy, numRestartedCenters) ; } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: " "UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d)\n", iteration, 100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations, numDistanceComputationsToRefreshUB, 100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations, numDistanceComputationsToRefreshLB, 100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations, numDistanceComputationsToRefreshCenterDistances, 100.0 * numDistanceComputationsToNewCenters / numDistanceComputations, numDistanceComputationsToNewCenters) ; } } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ; } break ; } if (allDone) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ; } break ; } } /* next Elkan iteration */ /* compute true energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++ x) { vl_uindex cx = assignments [x] ; energy += distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; totDistanceComputationsToFinalize += 1 ; } { vl_size totDistanceComputations = totDistanceComputationsToInit + totDistanceComputationsToRefreshUB + totDistanceComputationsToRefreshLB + totDistanceComputationsToRefreshCenterDistances + totDistanceComputationsToNewCenters + totDistanceComputationsToFinalize ; double saving = (double)totDistanceComputations / (iteration * self->numCenters * numData) ; if (self->verbosity) { VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n", totDistanceComputations, saving * 100.0) ; if (totNumRestartedCenters) { VL_PRINTF("kmeans: Elkan: there have been %d restarts\n", totNumRestartedCenters) ; } } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan: total dist. calc. per type: " "init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d), " "finalize: %.1f%% (%d)\n", 100.0 * totDistanceComputationsToInit / totDistanceComputations, totDistanceComputationsToInit, 100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations, totDistanceComputationsToRefreshUB, 100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations, totDistanceComputationsToRefreshLB, 100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations, totDistanceComputationsToRefreshCenterDistances, 100.0 * totDistanceComputationsToNewCenters / totDistanceComputations, totDistanceComputationsToNewCenters, 100.0 * totDistanceComputationsToFinalize / totDistanceComputations, totDistanceComputationsToFinalize) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; vl_free(nextCenterDistances) ; vl_free(pointToClosestCenterUB) ; vl_free(pointToClosestCenterUBIsStrict) ; vl_free(pointToCenterLB) ; vl_free(newCenters) ; vl_free(centerToNewCenterDistances) ; return energy ; } /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { switch (self->algorithm) { case VlKMeansLloyd: return VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ; break ; case VlKMeansElkan: return VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ; break ; case VlKMeansANN: return VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ; break ; default: abort() ; } } /* VL_KMEANS_INSTANTIATING */ #else #ifndef __DOXYGEN__ #define FLT VL_TYPE_FLOAT #define TYPE float #define SFX f #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #define FLT VL_TYPE_DOUBLE #define TYPE double #define SFX d #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #endif /* VL_KMEANS_INSTANTIATING */ #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Set centers ** @param self KMeans object. ** @param centers centers to copy. ** @param dimension data dimension. ** @param numCenters number of centers. **/ void vl_kmeans_set_centers (VlKMeans * self, void const * centers, vl_size dimension, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_set_centers_f (self, (float const *)centers, dimension, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_set_centers_d (self, (double const *)centers, dimension, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief init centers by randomly sampling data ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. ** ** The function inits the KMeans centers by randomly sampling ** the data @a data. **/ void vl_kmeans_init_centers_with_rand_data (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_with_rand_data_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_with_rand_data_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Seed centers by the KMeans++ algorithm ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. **/ void vl_kmeans_init_centers_plus_plus (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_plus_plus_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_plus_plus_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data ** @param self KMeans object. ** @param assignments data to closest center assignments (output). ** @param distances data to closest center distance (output). ** @param data data to quantize. ** @param numData number of data points to quantize. **/ void vl_kmeans_quantize (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_f (self, assignments, distances, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_d (self, assignments, distances, (double const *)data, numData) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data using approximate nearest neighbours (ANN). ** @param self KMeans object. ** @param assignments data to centers assignments (output). ** @param distances data to closes center distance (output) ** @param data data to quantize. ** @param numData number of data points. ** @param update choose wether to update current assignments. ** ** The function uses an ANN procedure to compute the approximate ** nearest neighbours of the input data point. ** ** Setting @a update to ::VL_TRUE will cause the algorithm ** to *update existing assignments*. This means that each ** element of @a assignments and @a distances is updated ony if the ** ANN procedure can find a better assignment of the existing one. **/ void vl_kmeans_quantize_ann (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData, vl_bool update) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_ann_f (self, assignments, distances, (float const *)data, numData, update) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_ann_d (self, assignments, distances, (double const *)data, numData, update) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Refine center locations. ** @param self KMeans object. ** @param data data to quantize. ** @param numData number of data points. ** @return K-means energy at the end of optimization. ** ** The function calls the underlying K-means quantization algorithm ** (@ref VlKMeansAlgorithm) to quantize the specified data @a data. ** The function assumes that the cluster centers have already ** been assigned by using one of the seeding functions, or by ** setting them. **/ double vl_kmeans_refine_centers (VlKMeans * self, void const * data, vl_size numData) { assert (self->centers) ; switch (self->dataType) { case VL_TYPE_FLOAT : return _vl_kmeans_refine_centers_f (self, (float const *)data, numData) ; case VL_TYPE_DOUBLE : return _vl_kmeans_refine_centers_d (self, (double const *)data, numData) ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Cluster data. ** @param self KMeans object. ** @param data data to quantize. ** @param dimension data dimension. ** @param numData number of data points. ** @param numCenters number of clusters. ** @return K-means energy at the end of optimization. ** ** The function initializes the centers by using the initialization ** algorithm set by ::vl_kmeans_set_initialization and refines them ** by the quantization algorithm set by ::vl_kmeans_set_algorithm. ** The process is repeated one or more times (see ** ::vl_kmeans_set_num_repetitions) and the resutl with smaller ** energy is retained. **/ double vl_kmeans_cluster (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex repetition ; double bestEnergy = VL_INFINITY_D ; void * bestCenters = NULL ; for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) { double energy ; double timeRef ; if (self->verbosity) { VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ; } timeRef = vl_get_cpu_time() ; switch (self->initialization) { case VlKMeansRandomSelection : vl_kmeans_init_centers_with_rand_data (self, data, dimension, numData, numCenters) ; break ; case VlKMeansPlusPlus : vl_kmeans_init_centers_plus_plus (self, data, dimension, numData, numCenters) ; break ; default: abort() ; } if (self->verbosity) { VL_PRINTF("kmeans: K-means initialized in %.2f s\n", vl_get_cpu_time() - timeRef) ; } timeRef = vl_get_cpu_time () ; energy = vl_kmeans_refine_centers (self, data, numData) ; if (self->verbosity) { VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n", vl_get_cpu_time() - timeRef, energy) ; } /* copy centers to output if current solution is optimal */ /* check repetition == 0 as well in case energy = NaN, which */ /* can happen if the data contain NaNs */ if (energy < bestEnergy || repetition == 0) { void * temp ; bestEnergy = energy ; if (bestCenters == NULL) { bestCenters = vl_malloc(vl_get_type_size(self->dataType) * self->dimension * self->numCenters) ; } /* swap buffers */ temp = bestCenters ; bestCenters = self->centers ; self->centers = temp ; } /* better energy */ } /* next repetition */ vl_free (self->centers) ; self->centers = bestCenters ; return bestEnergy ; } /* VL_KMEANS_INSTANTIATING */ #endif #undef SFX #undef TYPE #undef FLT #undef VL_KMEANS_INSTANTIATING
zcposv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions mixed zc -> ds * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "core_lapack.h" #include <math.h> #include <omp.h> /***************************************************************************//** * * @ingroup plasma_posv * * Computes the solution to a system of linear equations A * X = B, where A is * an n-by-n Hermitian positive definite matrix and X and B are n-by-nrhs matrices. * * plasma_zcposv first factorizes the matrix using plasma_cpotrf and uses * this factorization within an iterative refinement procedure to produce a * solution with COMPLEX*16 normwise backward error quality (see below). If * the approach fails the method falls back to a COMPLEX*16 factorization and * solve. * * The iterative refinement is not going to be a winning strategy if * the ratio COMPLEX performance over COMPLEX*16 performance is too * small. A reasonable strategy should take the number of right-hand * sides and the size of the matrix into account. This might be done * with a call to ILAENV in the future. Up to now, we always try * iterative refinement. * * The iterative refinement process is stopped if iter > itermax or * for all the RHS we have: Rnorm < sqrt(n)*Xnorm*Anorm*eps*BWDmax * where: * * - iter is the number of the current iteration in the iterative refinement * process * - Rnorm is the Infinity-norm of the residual * - Xnorm is the Infinity-norm of the solution * - Anorm is the Infinity-operator-norm of the matrix A * - eps is the machine epsilon returned by DLAMCH('Epsilon'). * The values itermax and BWDmax are fixed to 30 and 1.0D+00 respectively. * ******************************************************************************* * * @param[in] uplo * Specifies whether the matrix A is upper or lower triangular: * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of linear equations, i.e., the order of the matrix A. * n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns of the * matrix B. nrhs >= 0. * * @param[in,out] pA * The n-by-n Hermitian positive definite coefficient matrix A. * If uplo = PlasmaUpper, the leading n-by-n upper triangular part of * A contains the upper triangular part of the matrix A, and the * strictly lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading n-by-n lower triangular part of * A contains the lower triangular part of the matrix A, and the * strictly upper triangular part of A is not referenced. * On exit, contains the lower Cholesky factor matrix L, * if uplo == PlasmaLower and upper Cholesky factor conj(L^T), * otherwise. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[in] pB * The n-by-nrhs matrix of right hand side matrix B. * This matrix remains unchanged. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * * @param[out] pX * If return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldx * The leading dimension of the array X. ldx >= max(1,n). * * @param[out] iter * The number of the iterations in the iterative refinement * process, needed for the convergence. If failed, it is set * to be -(1+itermax), where itermax = 30. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_zcposv * @sa plasma_dsposv * @sa plasma_zposv * ******************************************************************************/ int plasma_zcposv(plasma_enum_t uplo, int n, int nrhs, plasma_complex64_t *pA, int lda, plasma_complex64_t *pB, int ldb, plasma_complex64_t *pX, int ldx, int *iter) { // Get PLASMA context plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments if (uplo != PlasmaUpper && uplo != PlasmaLower) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } if (ldx < imax(1, n)) { plasma_error("illegal value of ldx"); return -9; } // Quick return *iter = 0; if (imin(n, nrhs) == 0) return PlasmaSuccess; // Set tiling parameters int nb = plasma->nb; // Create tile matrices plasma_desc_t A; plasma_desc_t B; plasma_desc_t X; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &X); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Create additional tile matrices plasma_desc_t R, As, Xs; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, B.m, B.n, 0, 0, B.m, B.n, &R); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, A.m, A.n, 0, 0, A.m, A.n, &As); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); plasma_desc_destroy(&R); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, X.m, X.n, 0, 0, X.m, X.n, &Xs); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); plasma_desc_destroy(&R); plasma_desc_destroy(&As); return retval; } // Allocate tiled workspace for Infinity norm calculations size_t lwork = imax((size_t)A.nt*A.n+A.n, (size_t)X.mt*X.n+(size_t)R.mt*R.n); double *work = (double*)malloc(((size_t)lwork)*sizeof(double)); double *Rnorm = (double*)malloc(((size_t)R.n)*sizeof(double)); double *Xnorm = (double*)malloc(((size_t)X.n)*sizeof(double)); // Create sequence plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request plasma_request_t request = PlasmaRequestInitializer; // Asynchronous block #pragma omp parallel #pragma omp master { // Translate matrices to tile layout plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); // Call tile async function plasma_omp_zcposv(uplo, A, B, X, As, Xs, R, work, Rnorm, Xnorm, iter, sequence, &request); // Translate back to LAPACK layout plasma_omp_zdesc2ge(X, pX, ldx, sequence, &request); } // Implicit synchronization // Free matrices in tile layout plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); plasma_desc_destroy(&R); plasma_desc_destroy(&As); plasma_desc_destroy(&Xs); free(work); free(Rnorm); free(Xnorm); // Return status int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_posv * * Solves a Hermitian positive definite system using iterative refinement * with the Cholesky factor computed using plasma_cpotrf. * Non-blocking tile version of plasma_zcposv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * Specifies whether the matrix A is upper or lower triangular: * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * Descriptor of matrix A. * * @param[in] B * Descriptor of matrix B. * * @param[in,out] X * Descriptor of matrix X. * * @param[out] As * Descriptor of auxiliary matrix A in single complex precision. * * @param[out] Xs * Descriptor of auxiliary matrix X in single complex precision. * * @param[out] R * Descriptor of auxiliary remainder matrix R. * * @param[out] work * Workspace needed to compute infinity norm of the matrix A. * * @param[out] Rnorm * Workspace needed to store the max value in each of resudual vectors. * * @param[out] Xnorm * Workspace needed to store the max value in each of currenct solution * vectors. * * @param[out] iter * The number of the iterations in the iterative refinement * process, needed for the convergence. If failed, it is set * to be -(1+itermax), where itermax = 30. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PLASMA_SUCCESS (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zcposv * @sa plasma_omp_dsposv * @sa plasma_omp_zposv * ******************************************************************************/ void plasma_omp_zcposv(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B, plasma_desc_t X, plasma_desc_t As, plasma_desc_t Xs, plasma_desc_t R, double *work, double *Rnorm, double *Xnorm, int *iter, plasma_sequence_t *sequence, plasma_request_t *request) { const int itermax = 30; const double bwdmax = 1.0; const plasma_complex64_t zmone = -1.0; const plasma_complex64_t zone = 1.0; *iter = 0; // Get PLASMA context plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments if (uplo != PlasmaUpper && uplo != PlasmaLower) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(X) != PlasmaSuccess) { plasma_error("invalid X"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(As) != PlasmaSuccess) { plasma_error("invalid As"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(Xs) != PlasmaSuccess) { plasma_error("invalid Xs"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(R) != PlasmaSuccess) { plasma_error("invalid R"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Quick return. if (A.n == 0 || B.n == 0) return; // Workspace for dzamax double *workX = work; double *workR = &work[X.mt*X.n]; // Compute some constants. double cte; double eps = LAPACKE_dlamch_work('E'); double Anorm; plasma_pzlanhe(PlasmaInfNorm, uplo, A, work, &Anorm, sequence, request); // Convert B from double to single precision, store result in Xs. plasma_pzlag2c(B, Xs, sequence, request); // Convert A from double to single precision, store result in As. // TODO: need zlat2c plasma_pzlag2c(A, As, sequence, request); // Compute the Cholesky factorization of As. plasma_pcpotrf(uplo, As, sequence, request); // Solve the system As * Xs = Bs. plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); // Convert Xs to double precision. plasma_pclag2z(Xs, X, sequence, request); // Compute R = B - A * X. plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request); plasma_pzhemm(PlasmaLeft, uplo, zmone, A, X, zone, R, sequence, request); // Check whether the nrhs normwise backward error satisfies the // stopping criterion. If yes, set iter=0 and return. plasma_pdzamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request); plasma_pdzamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request); #pragma omp taskwait { cte = Anorm * eps * sqrt((double)A.n) * bwdmax; int flag = 1; for (int n = 0; n < R.n && flag == 1; n++) { if (Rnorm[n] > Xnorm[n] * cte) { flag = 0; } } if (flag == 1) { *iter = 0; return; } } // Iterative refinement for (int iiter = 0; iiter < itermax; iiter++) { // Convert R from double to single precision, store result in Xs. plasma_pzlag2c(R, Xs, sequence, request); // Solve the system As * Xs = Rs. plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); // Convert Xs back to double precision and update the current iterate. plasma_pclag2z(Xs, R, sequence, request); plasma_pzgeadd(PlasmaNoTrans, zone, R, zone, X, sequence, request); // Compute R = B - A * X plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request); plasma_pzhemm(PlasmaLeft, uplo, zmone, A, X, zone, R, sequence, request); // Check whether nrhs normwise backward error satisfies the // stopping criterion. If yes, set iter = iiter > 0 and return. plasma_pdzamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request); plasma_pdzamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request); #pragma omp taskwait { int flag = 1; for (int n = 0; n < R.n && flag == 1; n++) { if (Rnorm[n] > Xnorm[n] * cte) { flag = 0; } } if (flag == 1) { *iter = iiter+1; return; } } } // If we are at this place of the code, this is because we have performed // iter = itermax iterations and never satisfied the stopping criterion, // set up the iter flag accordingly and follow up with double precision routine. *iter = -itermax - 1; // Compute Cholesky factorization of A. plasma_pzpotrf(uplo, A, sequence, request); // Solve the system A * X = B. plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, X, sequence, request); plasma_pztrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, A, X, sequence, request); plasma_pztrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, A, X, sequence, request); }
fieldaligned.h
#pragma once #include <cmath> #include <cusp/transpose.h> #include <cusp/csr_matrix.h> #include "../backend/grid.h" #include "../blas.h" #include "../backend/interpolation.cuh" #include "../backend/functions.h" #include "../functors.h" #include "../nullstelle.h" #include "../runge_kutta.h" namespace dg{ /** * @brief With the Default field ds becomes a dz */ struct DefaultField { void operator()( const dg::HVec& y, dg::HVec& yp) { yp[0] = yp[1] = 0; yp[2] = 1.; } double error( const dg::HVec& x0, const dg::HVec& x1) { return sqrt( (x0[0]-x1[0])*(x0[0]-x1[0]) +(x0[1]-x1[1])*(x0[1]-x1[1])+(x0[2]-x1[2])*(x0[2]-x1[2])); } bool monitor( const dg::HVec& end){ return true;} double operator()( double x, double y) //1/B { return 1.; } double operator()( double x, double y, double z) { return 1.; } }; /** * @brief Default Limiter means there is a limiter everywhere */ struct DefaultLimiter { /** * @brief return 1 * * @param x x value * @param y y value * @return 1 */ double operator()(double x, double y) { return 1; } }; /** * @brief No Limiter */ struct NoLimiter { /** * @brief return 0 * * @param x x value * @param y y value * @return 0 */ double operator()(double x, double y) { return 0.; } }; /** * @brief Integrate a field line to find whether the result lies inside or outside of the box * * @tparam Field Must be usable in the integrateRK4 function * @tparam Grid must provide 2d boundaries x0(), x1(), y0(), and y1() */ template < class Field, class Grid> struct BoxIntegrator { /** * @brief Construct from a given Field and Grid and accuracy * * @param field field must overload operator() with dg::HVec for three entries * @param g The 2d or 3d grid * @param eps the accuracy of the runge kutta integrator */ BoxIntegrator( Field field, const Grid& g, double eps): field_(field), g_(g), coords_(3), coordsp_(3), eps_(eps) {} /** * @brief Set the starting coordinates for next field line integration * * @param coords the new coords (must have size = 3) */ void set_coords( const thrust::host_vector<double>& coords){ coords_ = coords;} /** * @brief Integrate from 0 to deltaPhi * * @param deltaPhi upper integration boundary * * @return 1 if point is inside the box, -1 else */ double operator()( double deltaPhi) { try{ dg::integrateRK4( field_, coords_, coordsp_, deltaPhi, eps_); } catch( dg::NotANumber& exception) { return -1;} if (!(coordsp_[0] >= g_.x0() && coordsp_[0] <= g_.x1())) { return -1; } if (!(coordsp_[1] >= g_.y0() && coordsp_[1] <= g_.y1())) { return -1; } return +1; } private: Field field_; Grid g_; thrust::host_vector<double> coords_, coordsp_; double eps_; }; /** * @brief Integrate one field line in a given box, Result is guaranteed to lie inside the box * * @tparam Field Must be usable in the integrateRK4 function * @tparam Grid must provide 2d boundaries x0(), x1(), y0(), and y1() * @param field The field to use * @param grid instance of the Grid class * @param coords0 The initial condition * @param coords1 The resulting points (write only) guaranteed to lie inside the grid * @param phi1 The angle (read/write) contains maximum phi on input and resulting phi on output * @param eps error * @param globalbcz boundary condition (DIR or NEU) */ template< class Field, class Grid> void boxintegrator( Field& field, const Grid& grid, const thrust::host_vector<double>& coords0, thrust::host_vector<double>& coords1, double& phi1, double eps, dg::bc globalbcz) { dg::integrateRK4( field, coords0, coords1, phi1, eps); //+ integration //First catch periodic domain grid.shift_topologic( coords0[0], coords0[1], coords1[0], coords1[1]); if ( !grid.contains( coords1[0], coords1[1])) //Punkt liegt immer noch außerhalb { #ifdef DG_DEBUG std::cerr << "point "<<coords1[0]<<" "<<coords1[1]<<" "<<coords1[3]<<" "<<coords1[4]<<" is somewhere else!\n"; #endif //DG_DEBUG if( globalbcz == dg::DIR) { BoxIntegrator<Field, Grid> boxy( field, grid, eps); boxy.set_coords( coords0); //nimm alte koordinaten if( phi1 > 0) { double dPhiMin = 0, dPhiMax = phi1; dg::bisection1d( boxy, dPhiMin, dPhiMax,eps); //suche 0 stelle phi1 = (dPhiMin+dPhiMax)/2.; dg::integrateRK4( field, coords0, coords1, dPhiMax, eps); //integriere bis über 0 stelle raus damit unten Wert neu gesetzt wird } else { double dPhiMin = phi1, dPhiMax = 0; dg::bisection1d( boxy, dPhiMin, dPhiMax,eps); phi1 = (dPhiMin+dPhiMax)/2.; dg::integrateRK4( field, coords0, coords1, dPhiMin, eps); } if (!(coords1[0] > grid.x0())) { coords1[0]=grid.x0();} if (!(coords1[0] < grid.x1())) { coords1[0]=grid.x1();} if (!(coords1[1] > grid.y0())) { coords1[1]=grid.y0();} if (!(coords1[1] < grid.y1())) { coords1[1]=grid.y1();} } else if (globalbcz == dg::NEU ) { coords1[0] = coords0[0]; coords1[1] = coords0[1]; } else if (globalbcz == DIR_NEU )std::cerr << "DIR_NEU NOT IMPLEMENTED "<<std::endl; else if (globalbcz == NEU_DIR )std::cerr << "NEU_DIR NOT IMPLEMENTED "<<std::endl; else if (globalbcz == dg::PER )std::cerr << "PER NOT IMPLEMENTED "<<std::endl; } } ////////////////////////////////////FieldAlignedCLASS//////////////////////////////////////////// /** * @brief Class for the evaluation of a parallel derivative * * This class discretizes the operators \f$ \nabla_\parallel = \mathbf{b}\cdot \nabla = b_R\partial_R + b_Z\partial_Z + b_\phi\partial_\phi \f$, \f$\nabla_\parallel^\dagger\f$ and \f$\Delta_\parallel=\nabla_\parallel^\dagger\cdot\nabla_\parallel\f$ in cylindrical coordinates * @ingroup utilities * @tparam Matrix The matrix class of the interpolation matrix * @tparam container The container-class on which the interpolation matrix operates on (does not need to be dg::HVec) */ template< class Geometry, class Matrix, class container > struct FieldAligned { /** * @brief Construct from a field and a grid * * @tparam Field The Fieldlines to be integrated: Has to provide void operator()( const std::vector<dg::HVec>&, std::vector<dg::HVec>&) where the first index is R, the second Z and the last s (the length of the field line) * @tparam Limiter Class that can be evaluated on a 2d grid, returns 1 if there is a limiter and 0 if there isn't. If a field line crosses the limiter in the plane \f$ \phi=0\f$ then the limiter boundary conditions apply. * @param field The field to integrate * @param grid The grid on which to operate * @param eps Desired accuracy of runge kutta * @param limit Instance of the limiter class (Default is a limiter everywhere, note that if bcz is periodic it doesn't matter if there is a limiter or not) * @param globalbcz Choose NEU or DIR. Defines BC in parallel on bounding box * @param deltaPhi Is either <0 (then it's ignored), may differ from hz() only if Nz() == 1 * @note If there is a limiter, the boundary condition on the first/last plane is set by the bcz variable from the grid and can be changed by the set_boundaries function. If there is no limiter the boundary condition is periodic. */ template <class Field, class Limiter> FieldAligned(Field field, Geometry grid, double eps = 1e-4, Limiter limit = DefaultLimiter(), dg::bc globalbcz = dg::DIR, double deltaPhi = -1); /** * @brief Set boundary conditions in the limiter region * * if Dirichlet boundaries are used the left value is the left function value, if Neumann boundaries are used the left value is the left derivative value * @param bcz boundary condition * @param left left boundary value * @param right right boundary value */ void set_boundaries( dg::bc bcz, double left, double right) { bcz_ = bcz; const dg::Grid2d g2d( g_.x0(), g_.x1(), g_.y0(), g_.y1(), g_.n(), g_.Nx(), g_.Ny()); left_ = dg::evaluate( dg::CONSTANT(left), g2d); right_ = dg::evaluate( dg::CONSTANT(right),g2d); } /** * @brief Set boundary conditions in the limiter region * * if Dirichlet boundaries are used the left value is the left function value, if Neumann boundaries are used the left value is the left derivative value * @param bcz boundary condition * @param left left boundary value * @param right right boundary value */ void set_boundaries( dg::bc bcz, const container& left, const container& right) { bcz_ = bcz; left_ = left; right_ = right; } /** * @brief Set boundary conditions in the limiter region * * if Dirichlet boundaries are used the left value is the left function value, if Neumann boundaries are used the left value is the left derivative value * @param bcz boundary condition * @param global 3D vector containing boundary values * @param scal_left left scaling factor * @param scal_right right scaling factor */ void set_boundaries( dg::bc bcz, const container& global, double scal_left, double scal_right); /** * @brief Evaluate a 2d functor and transform to all planes along the fieldlines * * Evaluates the given functor on a 2d plane and then follows fieldlines to * get the values in the 3rd dimension. Uses the grid given in the constructor. * @tparam BinaryOp Binary Functor * @param f Functor to evaluate * @param plane The number of the plane to start * * @return Returns an instance of container */ template< class BinaryOp> container evaluate( BinaryOp f, unsigned plane=0) const; /** * @brief Evaluate a 2d functor and transform to all planes along the fieldlines * * Evaluates the given functor on a 2d plane and then follows fieldlines to * get the values in the 3rd dimension. Uses the grid given in the constructor. * The second functor is used to scale the values along the fieldlines. * The fieldlines are assumed to be periodic. * @tparam BinaryOp Binary Functor * @tparam UnaryOp Unary Functor * @param f Functor to evaluate in x-y * @param g Functor to evaluate in z * @param p0 The number of the plane to start * @param rounds The number of rounds to follow a fieldline * @note g is evaluated such that p0 corresponds to z=0, p0+1 corresponds to z=hz, p0-1 to z=-hz, ... * * @return Returns an instance of container */ template< class BinaryOp, class UnaryOp> container evaluate( BinaryOp f, UnaryOp g, unsigned p0, unsigned rounds) const; /** * @brief Applies the interpolation to the next planes * * @param in input * @param out output may not equal intpu */ void einsPlus( const container& in, container& out); /** * @brief Applies the interpolation to the previous planes * * @param in input * @param out output may not equal intpu */ void einsMinus( const container& in, container& out); /** * @brief Applies the transposed interpolation to the previous plane * * @param in input * @param out output may not equal intpu */ void einsPlusT( const container& in, container& out); /** * @brief Applies the transposed interpolation to the next plane * * @param in input * @param out output may not equal intpu */ void einsMinusT( const container& in, container& out); /** * @brief hz is the distance between the plus and minus planes * * @return three-dimensional vector */ const container& hz()const {return hz_;} /** * @brief hp is the distance between the plus and current planes * * @return three-dimensional vector */ const container& hp()const {return hp_;} /** * @brief hm is the distance between the current and minus planes * * @return three-dimensional vector */ const container& hm()const {return hm_;} /** * @brief Access the underlying grid * * @return the grid */ const Geometry& grid() const{return g_;} private: typedef cusp::array1d_view< typename container::iterator> View; typedef cusp::array1d_view< typename container::const_iterator> cView; Matrix plus, minus, plusT, minusT; //interpolation matrices container hz_, hp_,hm_, ghostM, ghostP; Geometry g_; dg::bc bcz_; container left_, right_; container limiter_; }; ///@cond ////////////////////////////////////DEFINITIONS//////////////////////////////////////// template<class Geometry, class M, class container> template <class Field, class Limiter> FieldAligned<Geometry, M,container>::FieldAligned(Field field, Geometry grid, double eps, Limiter limit, dg::bc globalbcz, double deltaPhi): hz_( dg::evaluate( dg::zero, grid)), hp_( hz_), hm_( hz_), g_(grid), bcz_(grid.bcz()) { //Resize vector to 2D grid size typename Geometry::perpendicular_grid g2d = grid.perp_grid( ); unsigned size = g2d.size(); limiter_ = dg::evaluate( limit, g2d); right_ = left_ = dg::evaluate( zero, g2d); ghostM.resize( size); ghostP.resize( size); //Set starting points std::vector<thrust::host_vector<double> > y( 5, dg::evaluate( dg::cooX2d, g2d)); // x y[1] = dg::evaluate( dg::cooY2d, g2d); //y y[2] = dg::evaluate( dg::zero, g2d); y[3] = dg::pullback( dg::cooX2d, g2d); //R y[4] = dg::pullback( dg::cooY2d, g2d); //Z //integrate field lines for all points std::vector<thrust::host_vector<double> > yp( 3, dg::evaluate(dg::zero, g2d)), ym(yp); if( deltaPhi <=0) deltaPhi = grid.hz(); else assert( grid.Nz() == 1 || grid.hz()==deltaPhi); #ifdef _OPENMP #pragma omp parallel for shared(field) #endif //_OPENMP for( unsigned i=0; i<size; i++) { thrust::host_vector<double> coords(5), coordsP(5), coordsM(5); coords[0] = y[0][i], coords[1] = y[1][i], coords[2] = y[2][i], coords[3] = y[3][i], coords[4] = y[4][i]; //x,y,s,R,Z double phi1 = deltaPhi; boxintegrator( field, g2d, coords, coordsP, phi1, eps, globalbcz); phi1 = - deltaPhi; boxintegrator( field, g2d, coords, coordsM, phi1, eps, globalbcz); yp[0][i] = coordsP[0], yp[1][i] = coordsP[1], yp[2][i] = coordsP[2]; ym[0][i] = coordsM[0], ym[1][i] = coordsM[1], ym[2][i] = coordsM[2]; } //fange Periodische RB ab plus = dg::create::interpolation( yp[0], yp[1], g2d, globalbcz); minus = dg::create::interpolation( ym[0], ym[1], g2d, globalbcz); // // Transposed matrices work only for csr_matrix due to bad matrix form for ell_matrix and MPI_Matrix lacks of transpose function!!! cusp::transpose( plus, plusT); cusp::transpose( minus, minusT); // copy into h vectors for( unsigned i=0; i<grid.Nz(); i++) { thrust::copy( yp[2].begin(), yp[2].end(), hp_.begin() + i*g2d.size()); thrust::copy( ym[2].begin(), ym[2].end(), hm_.begin() + i*g2d.size()); } dg::blas1::scal( hm_, -1.); dg::blas1::axpby( 1., hp_, +1., hm_, hz_); // } template<class G, class M, class container> void FieldAligned<G,M,container>::set_boundaries( dg::bc bcz, const container& global, double scal_left, double scal_right) { unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); cView left( global.cbegin(), global.cbegin() + size); cView right( global.cbegin()+(g_.Nz()-1)*size, global.cbegin() + g_.Nz()*size); View leftView( left_.begin(), left_.end()); View rightView( right_.begin(), right_.end()); cusp::copy( left, leftView); cusp::copy( right, rightView); dg::blas1::scal( left_, scal_left); dg::blas1::scal( right_, scal_right); bcz_ = bcz; } template< class G, class M, class container> template< class BinaryOp> container FieldAligned<G,M,container>::evaluate( BinaryOp binary, unsigned p0) const { return evaluate( binary, dg::CONSTANT(1), p0, 0); } template<class G, class M, class container> template< class BinaryOp, class UnaryOp> container FieldAligned<G,M,container>::evaluate( BinaryOp binary, UnaryOp unary, unsigned p0, unsigned rounds) const { //idea: simply apply I+/I- enough times on the init2d vector to get the result in each plane //unary function is always such that the p0 plane is at x=0 assert( g_.Nz() > 1); assert( p0 < g_.Nz()); const typename G::perpendicular_grid g2d = g_.perp_grid(); container init2d = dg::pullback( binary, g2d), temp(init2d), tempP(init2d), tempM(init2d); container vec3d = dg::evaluate( dg::zero, g_); std::vector<container> plus2d( g_.Nz(), (container)dg::evaluate(dg::zero, g2d) ), minus2d( plus2d), result( plus2d); unsigned turns = rounds; if( turns ==0) turns++; //first apply Interpolation many times, scale and store results for( unsigned r=0; r<turns; r++) for( unsigned i0=0; i0<g_.Nz(); i0++) { dg::blas1::copy( init2d, tempP); dg::blas1::copy( init2d, tempM); unsigned rep = i0 + r*g_.Nz(); for(unsigned k=0; k<rep; k++) { dg::blas2::symv( plus, tempP, temp); temp.swap( tempP); dg::blas2::symv( minus, tempM, temp); temp.swap( tempM); } dg::blas1::scal( tempP, unary( (double)rep*g_.hz() ) ); dg::blas1::scal( tempM, unary( -(double)rep*g_.hz() ) ); dg::blas1::axpby( 1., tempP, 1., plus2d[i0]); dg::blas1::axpby( 1., tempM, 1., minus2d[i0]); } //now we have the plus and the minus filaments if( rounds == 0) //there is a limiter { for( unsigned i0=0; i0<g_.Nz(); i0++) { int idx = (int)i0 - (int)p0; if(idx>=0) result[i0] = plus2d[idx]; else result[i0] = minus2d[abs(idx)]; thrust::copy( result[i0].begin(), result[i0].end(), vec3d.begin() + i0*g2d.size()); } } else //sum up plus2d and minus2d { for( unsigned i0=0; i0<g_.Nz(); i0++) { unsigned revi0 = (g_.Nz() - i0)%g_.Nz(); //reverted index dg::blas1::axpby( 1., plus2d[i0], 0., result[i0]); dg::blas1::axpby( 1., minus2d[revi0], 1., result[i0]); } dg::blas1::axpby( -1., init2d, 1., result[0]); for(unsigned i0=0; i0<g_.Nz(); i0++) { int idx = ((int)i0 -(int)p0 + g_.Nz())%g_.Nz(); //shift index thrust::copy( result[idx].begin(), result[idx].end(), vec3d.begin() + i0*g2d.size()); } } return vec3d; } template< class G, class M, class container> void FieldAligned<G,M, container>::einsPlus( const container& f, container& fpe) { unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); cView rightV( right_.begin(), right_.end()); for( unsigned i0=0; i0<g_.Nz(); i0++) { unsigned ip = (i0==g_.Nz()-1) ? 0:i0+1; cView fp( f.cbegin() + ip*size, f.cbegin() + (ip+1)*size); cView f0( f.cbegin() + i0*size, f.cbegin() + (i0+1)*size); View fP( fpe.begin() + i0*size, fpe.begin() + (i0+1)*size); cusp::multiply( plus, fp, fP); //make ghostcells i.e. modify fpe in the limiter region if( i0==g_.Nz()-1 && bcz_ != dg::PER) { if( bcz_ == dg::DIR || bcz_ == dg::NEU_DIR) { cusp::blas::axpby( rightV, f0, ghostPV, 2., -1.); } if( bcz_ == dg::NEU || bcz_ == dg::DIR_NEU) { thrust::transform( right_.begin(), right_.end(), hp_.begin(), ghostM.begin(), thrust::multiplies<double>()); cusp::blas::axpby( ghostMV, f0, ghostPV, 1., 1.); } //interlay ghostcells with periodic cells: L*g + (1-L)*fpe cusp::blas::axpby( ghostPV, fP, ghostPV, 1., -1.); dg::blas1::pointwiseDot( limiter_, ghostP, ghostP); cusp::blas::axpby( ghostPV, fP, fP, 1.,1.); } } } template< class G,class M, class container> void FieldAligned<G,M, container>::einsMinus( const container& f, container& fme) { //note that thrust functions don't work on views unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); cView leftV( left_.begin(), left_.end()); for( unsigned i0=0; i0<g_.Nz(); i0++) { unsigned im = (i0==0) ? g_.Nz()-1:i0-1; cView fm( f.cbegin() + im*size, f.cbegin() + (im+1)*size); cView f0( f.cbegin() + i0*size, f.cbegin() + (i0+1)*size); View fM( fme.begin() + i0*size, fme.begin() + (i0+1)*size); cusp::multiply( minus, fm, fM ); //make ghostcells if( i0==0 && bcz_ != dg::PER) { if( bcz_ == dg::DIR || bcz_ == dg::DIR_NEU) { cusp::blas::axpby( leftV, f0, ghostMV, 2., -1.); } if( bcz_ == dg::NEU || bcz_ == dg::NEU_DIR) { thrust::transform( left_.begin(), left_.end(), hm_.begin(), ghostP.begin(), thrust::multiplies<double>()); cusp::blas::axpby( ghostPV, f0, ghostMV, -1., 1.); } //interlay ghostcells with periodic cells: L*g + (1-L)*fme cusp::blas::axpby( ghostMV, fM, ghostMV, 1., -1.); dg::blas1::pointwiseDot( limiter_, ghostM, ghostM); cusp::blas::axpby( ghostMV, fM, fM, 1., 1.); } } } template< class G,class M, class container> void FieldAligned<G,M, container>::einsMinusT( const container& f, container& fpe) { unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); cView rightV( right_.begin(), right_.end()); for( unsigned i0=0; i0<g_.Nz(); i0++) { unsigned ip = (i0==g_.Nz()-1) ? 0:i0+1; cView fp( f.cbegin() + ip*size, f.cbegin() + (ip+1)*size); cView f0( f.cbegin() + i0*size, f.cbegin() + (i0+1)*size); View fP( fpe.begin() + i0*size, fpe.begin() + (i0+1)*size); cusp::multiply( minusT, fp, fP ); //make ghostcells i.e. modify fpe in the limiter region if( i0==g_.Nz()-1 && bcz_ != dg::PER) { if( bcz_ == dg::DIR || bcz_ == dg::NEU_DIR) { cusp::blas::axpby( rightV, f0, ghostPV, 2., -1.); } if( bcz_ == dg::NEU || bcz_ == dg::DIR_NEU) { thrust::transform( right_.begin(), right_.end(), hp_.begin(), ghostM.begin(), thrust::multiplies<double>()); cusp::blas::axpby( ghostMV, f0, ghostPV, 1., 1.); } //interlay ghostcells with periodic cells: L*g + (1-L)*fpe cusp::blas::axpby( ghostPV, fP, ghostPV, 1., -1.); dg::blas1::pointwiseDot( limiter_, ghostP, ghostP); cusp::blas::axpby( ghostPV, fP, fP, 1.,1.); } } } template< class G,class M, class container> void FieldAligned<G,M, container>::einsPlusT( const container& f, container& fme) { //note that thrust functions don't work on views unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); cView leftV( left_.begin(), left_.end()); for( unsigned i0=0; i0<g_.Nz(); i0++) { unsigned im = (i0==0) ? g_.Nz()-1:i0-1; cView fm( f.cbegin() + im*size, f.cbegin() + (im+1)*size); cView f0( f.cbegin() + i0*size, f.cbegin() + (i0+1)*size); View fM( fme.begin() + i0*size, fme.begin() + (i0+1)*size); cusp::multiply( plusT, fm, fM ); //make ghostcells if( i0==0 && bcz_ != dg::PER) { if( bcz_ == dg::DIR || bcz_ == dg::DIR_NEU) { cusp::blas::axpby( leftV, f0, ghostMV, 2., -1.); } if( bcz_ == dg::NEU || bcz_ == dg::NEU_DIR) { thrust::transform( left_.begin(), left_.end(), hm_.begin(), ghostP.begin(), thrust::multiplies<double>()); cusp::blas::axpby( ghostPV, f0, ghostMV, -1., 1.); } //interlay ghostcells with periodic cells: L*g + (1-L)*fme cusp::blas::axpby( ghostMV, fM, ghostMV, 1., -1.); dg::blas1::pointwiseDot( limiter_, ghostM, ghostM); cusp::blas::axpby( ghostMV, fM, fM, 1., 1.); } } } ///@endcond }//namespace dg
threads1.c
#include <stdio.h> #include <omp.h> void* thread_function(); int contador_global = 0; int main(int argc, char* argv[]) { unsigned int num_threads = omp_get_thread_num(); if (argc >= 2) { num_threads = atoi(argv[1]); } printf("Executing with %d threads.\n", num_threads); #pragma omp parallel for for (unsigned int i = 0; i < num_threads; ++i) { thread_function(num_threads); } #pragma omp single printf("%d", contador_global); return 0; } void* thread_function(unsigned int num_threads) { for (int i = 0; i < num_threads * 1000; ++i) { #pragma omp critical contador_global++; } }
GB_unop__cimag_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cimag_fp32_fc32) // op(A') function: GB (_unop_tran__cimag_fp32_fc32) // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cimagf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cimagf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cimagf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CIMAG || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cimag_fp32_fc32) ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cimag_fp32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cg.20190406_format.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "globals.h" #include "randdp.h" #include "timers.h" #include <omp.h> //--------------------------------------------------------------------- #define CACHE_LINE_SIZE_PAD 128 #define INT_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(int) #define DOUBLE_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(double) /* common / main_int_mem / */ static int colidx[NZ]; static int rowstr[NA+1]; static int iv[NA]; static int arow[NA]; static int acol[NAZ]; /* common / main_flt_mem / */ static double aelt[NAZ]; static double a[NZ]; static double x[NA+2]; static double z[NA+2]; static double p[NA+2]; static double q[NA+2]; static double r[NA+2]; /* common / partit_size / */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /urando/ */ static double amult; static double tran; /* common /timers/ */ static logical timeron=true; //--------------------------------------------------------------------- //--------------------------------------------------------------------- static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int iv[]); static void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int firstrow, int lastrow, int nzloc[], double rcond, double shift); static void sprnvc(int n, int nz, int nn1, double v[], int iv[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); //--------------------------------------------------------------------- int main(int argc, char *argv[]) { omp_set_num_threads(omp_get_num_procs()); int i, j, k, it; double zeta; double rnorm; double norm_temp1, norm_temp2; double t, mflops, tmax; //char Class; logical verified; double zeta_verify_value, epsilon, err; char *t_names[T_last]; for (i = 0; i < T_last; i++) { timer_clear(i); } timer_start(T_init); firstrow = 0; lastrow = NA-1; firstcol = 0; lastcol = NA-1; zeta_verify_value = VALID_RESULT; printf("\nCG start...\n\n"); printf(" Size: %11d\n", NA); printf(" Iterations: %5d\n", NITER); printf("\n"); naa = NA; nzz = NZ; //--------------------------------------------------------------------- // Inialize random number generator //--------------------------------------------------------------------- tran = 314159265.0; amult = 1220703125.0; zeta = randlc(&tran, amult); //--------------------------------------------------------------------- // //--------------------------------------------------------------------- makea(naa, nzz, a, colidx, rowstr, firstrow, lastrow, firstcol, lastcol, arow, (int (*)[NONZER+1])(void*)acol, (double (*)[NONZER+1])(void*)aelt, iv); //--------------------------------------------------------------------- // Note: as a result of the above call to makea: // values of j used in indexing rowstr go from 0 --> lastrow-firstrow // values of colidx which are col indexes go from firstcol --> lastcol // So: // Shift the col index vals from actual (firstcol --> lastcol ) // to local, i.e., (0 --> lastcol-firstcol) //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(i,j,k) { #pragma omp for nowait for (j = 0; j < lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { colidx[k] = colidx[k] - firstcol; } } //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- #pragma omp for nowait for (i = 0; i < NA+1; i++) { x[i] = 1.0; } #pragma omp for nowait for (j = 0; j < lastcol - firstcol + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } } zeta = 0.0; //--------------------------------------------------------------------- //----> // Do one iteration untimed to init all code and data page tables //----> (then reinit, start timing, to niter its) //--------------------------------------------------------------------- for (it = 1; it <= 1; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; #pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j] * z[j]; norm_temp2 = norm_temp2 + z[j] * z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } } // end of do one iteration untimed //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i) for (i = 0; i < NA+1; i++) { x[i] = 1.0; } zeta = 0.0; timer_stop(T_init); printf(" Initialization time = %15.3f seconds\n", timer_read(T_init)); timer_start(T_bench); //--------------------------------------------------------------------- //----> // Main Iteration for inverse power method //----> //--------------------------------------------------------------------- for (it = 1; it <= NITER; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- timer_clear(T_conj_grad); if (timeron) timer_start(T_conj_grad); conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); if (timeron) timer_stop(T_conj_grad); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; #pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j]*z[j]; norm_temp2 = norm_temp2 + z[j]*z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); zeta = SHIFT + 1.0 / norm_temp1; if (it == 1) printf("\n iteration ||r|| zeta conj_seconds\n"); printf(" %5d %20.14E%20.13f %lf\n", it, rnorm, zeta, timer_read(T_conj_grad)); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } } // end of main iter inv pow meth timer_stop(T_bench); //--------------------------------------------------------------------- // End of timed section //--------------------------------------------------------------------- t = timer_read(T_bench); printf("\nComplete...\n"); epsilon = 1.0e-10; err = fabs(zeta - zeta_verify_value) / zeta_verify_value; if (err <= epsilon) { verified = true; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.13E\n", zeta); printf(" Error is %20.13E\n", err); } else { verified = false; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.13E\n", zeta); printf(" The correct zeta is %20.13E\n", zeta_verify_value); } printf("\n\nExecution time : %lf seconds\n\n", t); return 0; } //--------------------------------------------------------------------- // Floaging point arrays here are named as in spec discussion of // CG algorithm //--------------------------------------------------------------------- static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) { int j, k; int cgit, cgitmax = 25; double d, sum, rho, rho0, alpha, beta; rho = 0.0; //--------------------------------------------------------------------- // Initialize the CG algorithm: //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(j) { #pragma omp for for (j = 0; j < naa+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; } //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally... //--------------------------------------------------------------------- #pragma omp for reduction(+:rho) for (j = 0; j < lastcol - firstcol + 1; j++) { rho = rho + r[j]*r[j]; } } //--------------------------------------------------------------------- //----> // The conj grad iteration loop //----> //--------------------------------------------------------------------- for (cgit = 1; cgit <= cgitmax; cgit++) { //--------------------------------------------------------------------- // q = A.p // The partition submatrix-vector multiply: use workspace w //--------------------------------------------------------------------- // // NOTE: this version of the multiply is actually (slightly: maybe %5) // faster on the sp2 on 16 nodes than is the unrolled-by-2 version // below. On the Cray t3d, the reverse is true, i.e., the // unrolled-by-two version is some 10% faster. // The unrolled-by-8 version below is significantly faster // on the Cray t3d - overall speed of code is 1.5 times faster. #pragma omp parallel default(shared) private(j, k, sum) { #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } q[j] = sum; } } //--------------------------------------------------------------------- // Obtain p.q //--------------------------------------------------------------------- d = 0.0; #pragma omp parallel default(shared) private(j) { #pragma omp for reduction(+:d) for (j = 0; j < lastcol - firstcol + 1; j++) { d = d + p[j]*q[j]; } } //--------------------------------------------------------------------- // Obtain alpha = rho / (p.q) //--------------------------------------------------------------------- alpha = rho / d; //--------------------------------------------------------------------- // Save a temporary of rho //--------------------------------------------------------------------- rho0 = rho; //--------------------------------------------------------------------- // Obtain z = z + alpha*p // and r = r - alpha*q //--------------------------------------------------------------------- rho = 0.0; #pragma omp parallel default(shared) private(j) { #pragma omp for for (j = 0; j < lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; } } //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally... //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(j) { #pragma omp for reduction(+:rho) for (j = 0; j < lastcol - firstcol + 1; j++) { rho = rho + r[j]*r[j]; } } //--------------------------------------------------------------------- // Obtain beta: //--------------------------------------------------------------------- beta = rho / rho0; //--------------------------------------------------------------------- // p = r + beta*p //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(j) { #pragma omp for for (j = 0; j < lastcol - firstcol + 1; j++) { p[j] = r[j] + beta*p[j]; } } } // end of do cgit=1,cgitmax //--------------------------------------------------------------------- // Compute residual norm explicitly: ||r|| = ||x - A.z|| // First, form A.z // The partition submatrix-vector multiply //--------------------------------------------------------------------- /* for (j = 0; j < lastrow - firstrow + 1; j++) { printf("j = %d, colidx[%d] = %d, z[%d] = %lf\n", j, j, colidx[j], colidx[j], z[colidx[j]][0]); } */ double d_tmp; #pragma omp parallel default(shared) private(j, d, d_tmp) shared(sum) { sum = 0.0; #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { d = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { d = d + a[k]*z[colidx[k]]; } r[j] = d; } //--------------------------------------------------------------------- // At this point, r contains A.z //--------------------------------------------------------------------- #pragma omp for reduction(+:sum) for (j = 0; j < lastcol-firstcol+1; j++) { d_tmp = x[j] - r[j]; sum = sum + d_tmp*d_tmp; } } *rnorm = sqrt(sum); } //--------------------------------------------------------------------- // generate the test problem for benchmark 6 // makea generates a sparse matrix with a // prescribed sparsity distribution // // parameter type usage // // input // // n i number of cols/rows of matrix // nz i nonzeros as declared array size // rcond r*8 condition number // shift r*8 main diagonal shift // // output // // a r*8 array for nonzeros // colidx i col indices // rowstr i row pointers // // workspace // // iv, arow, acol i // aelt r*8 //--------------------------------------------------------------------- static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int iv[]) { int iouter, ivelt, nzv, nn1; int ivc[NONZER+1]; double vc[NONZER+1]; //--------------------------------------------------------------------- // nonzer is approximately (int(sqrt(nnza /n))); //--------------------------------------------------------------------- //--------------------------------------------------------------------- // nn1 is the smallest power of two not less than n //--------------------------------------------------------------------- nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); //--------------------------------------------------------------------- // Generate nonzero positions and save for the use in sparse. //--------------------------------------------------------------------- for (iouter = 0; iouter < n; iouter++) { nzv = NONZER; sprnvc(n, nzv, nn1, vc, ivc); vecset(n, vc, ivc, &nzv, iouter+1, 0.5); arow[iouter] = nzv; for (ivelt = 0; ivelt < nzv; ivelt++) { acol[iouter][ivelt] = ivc[ivelt] - 1; aelt[iouter][ivelt] = vc[ivelt]; } } //--------------------------------------------------------------------- // ... make the sparse matrix from list of elements with duplicates // (iv is used as workspace) //--------------------------------------------------------------------- sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol, aelt, firstrow, lastrow, iv, RCOND, SHIFT); } //--------------------------------------------------------------------- // rows range from firstrow to lastrow // the rowstr pointers are defined for nrows = lastrow-firstrow+1 values //--------------------------------------------------------------------- static void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int firstrow, int lastrow, int nzloc[], double rcond, double shift) { int nrows; //--------------------------------------------------- // generate a sparse matrix from a list of // [col, row, element] tri //--------------------------------------------------- int i, j, j1, j2, nza, k, kk, nzrow, jcol; double size, scale, ratio, va; logical cont40; //--------------------------------------------------------------------- // how many rows of result //--------------------------------------------------------------------- nrows = lastrow - firstrow + 1; //--------------------------------------------------------------------- // ...count the number of triples in each row //--------------------------------------------------------------------- for (j = 0; j < nrows+1; j++) { rowstr[j] = 0; } for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza] + 1; rowstr[j] = rowstr[j] + arow[i]; } } rowstr[0] = 0; for (j = 1; j < nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } nza = rowstr[nrows] - 1; //--------------------------------------------------------------------- // ... rowstr(j) now is the location of the first nonzero // of row j of a //--------------------------------------------------------------------- if (nza > nz) { printf("Space for matrix elements exceeded in sparse\n"); printf("nza, nzmax = %d, %d\n", nza, nz); exit(EXIT_FAILURE); } //--------------------------------------------------------------------- // ... preload data pages //--------------------------------------------------------------------- for (j = 0; j < nrows; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { a[k] = 0.0; colidx[k] = -1; } nzloc[j] = 0; } //--------------------------------------------------------------------- // ... generate actual values by summing duplicates //--------------------------------------------------------------------- size = 1.0; ratio = pow(rcond, (1.0 / (double)(n))); for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza]; scale = size * aelt[i][nza]; for (nzrow = 0; nzrow < arow[i]; nzrow++) { jcol = acol[i][nzrow]; va = aelt[i][nzrow] * scale; //-------------------------------------------------------------------- // ... add the identity * rcond to the generated matrix to bound // the smallest eigenvalue from below by rcond //-------------------------------------------------------------------- if (jcol == j && j == i) { va = va + rcond - shift; } cont40 = false; for (k = rowstr[j]; k < rowstr[j+1]; k++) { if (colidx[k] > jcol) { //---------------------------------------------------------------- // ... insert colidx here orderly //---------------------------------------------------------------- for (kk = rowstr[j+1]-2; kk >= k; kk--) { if (colidx[kk] > -1) { a[kk+1] = a[kk]; colidx[kk+1] = colidx[kk]; } } colidx[k] = jcol; a[k] = 0.0; cont40 = true; break; } else if (colidx[k] == -1) { colidx[k] = jcol; cont40 = true; break; } else if (colidx[k] == jcol) { //-------------------------------------------------------------- // ... mark the duplicated entry //-------------------------------------------------------------- nzloc[j] = nzloc[j] + 1; cont40 = true; break; } } if (cont40 == false) { printf("internal error in sparse: i=%d\n", i); exit(EXIT_FAILURE); } a[k] = a[k] + va; } } size = size * ratio; } //--------------------------------------------------------------------- // ... remove empty entries and generate final results //--------------------------------------------------------------------- for (j = 1; j < nrows; j++) { nzloc[j] = nzloc[j] + nzloc[j-1]; } for (j = 0; j < nrows; j++) { if (j > 0) { j1 = rowstr[j] - nzloc[j-1]; } else { j1 = 0; } j2 = rowstr[j+1] - nzloc[j]; nza = rowstr[j]; for (k = j1; k < j2; k++) { a[k] = a[nza]; colidx[k] = colidx[nza]; nza = nza + 1; } } for (j = 1; j < nrows+1; j++) { rowstr[j] = rowstr[j] - nzloc[j-1]; } nza = rowstr[nrows] - 1; } //--------------------------------------------------------------------- // generate a sparse n-vector (v, iv) // having nzv nonzeros // // mark(i) is set to 1 if position i is nonzero. // mark is all zero on entry and is reset to all zero before exit // this corrects a performance bug found by John G. Lewis, caused by // reinitialization of mark on every one of the n calls to sprnvc //--------------------------------------------------------------------- static void sprnvc(int n, int nz, int nn1, double v[], int iv[]) { int nzv, ii, i; double vecelt, vecloc; nzv = 0; while (nzv < nz) { vecelt = randlc(&tran, amult); //--------------------------------------------------------------------- // generate an integer between 1 and n in a portable manner //--------------------------------------------------------------------- vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; //--------------------------------------------------------------------- // was this integer generated already? //--------------------------------------------------------------------- logical was_gen = false; for (ii = 0; ii < nzv; ii++) { if (iv[ii] == i) { was_gen = true; break; } } if (was_gen) continue; v[nzv] = vecelt; iv[nzv] = i; nzv = nzv + 1; } } //--------------------------------------------------------------------- // scale a double precision number x in (0,1) by a power of 2 and chop it //--------------------------------------------------------------------- static int icnvrt(double x, int ipwr2) { return (int)(ipwr2 * x); } //--------------------------------------------------------------------- // set ith element of sparse vector (v, iv) with // nzv nonzeros to val //--------------------------------------------------------------------- static void vecset(int n, double v[], int iv[], int *nzv, int i, double val) { int k; logical set; set = false; for (k = 0; k < *nzv; k++) { if (iv[k] == i) { v[k] = val; set = true; } } if (set == false) { v[*nzv] = val; iv[*nzv] = i; *nzv = *nzv + 1; } }
9541.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; for (t4 = 1; t4 <= nx - 1; t4 += 1) for (t6 = 0; t6 <= ny - 1; t6 += 1) ey[t4][t6] = ey[t4][t6] - 0.5 * (hz[t4][t6] - hz[t4 - 1][t6]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 64) for (t10 = t8; t10 <= (ny - 1 < t8 + 63 ? ny - 1 : t8 + 63); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 64) for (t10 = t8; t10 <= (ny - 2 < t8 + 63 ? ny - 2 : t8 + 63); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
middle6r.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Simulation of boomerang analysis for Skinny * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdbool.h> // #define DEBUG 1 #define Nthreads 12 // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void init_prng(int offset) { //int initial_seed = 0x5EC7F2B0; //int initial_seed = 0x30051991; My birthday! unsigned int initial_seed = 10*time(NULL) + 11*offset; srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08X\n", initial_seed); } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { int i; unsigned char p1[16], p2[16]; unsigned char c3[16], c4[16]; unsigned char k1[48], k2[48], k3[48], k4[48]; // randomly choose k1 for (i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; // derive k3 for (i = 0; i < (versions[ver][1] >> 3); i++) k3[i] = k1[i] ^ dk2[i]; // derive k4 for (i = 0; i < (versions[ver][1] >> 3); i++) k4[i] = k2[i] ^ dk2[i]; int num = 0; for (int t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) p1[i] = (rand() ^ c3[i]) & 0xff; // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) p2[i] = p1[i] ^ dp[i]; enc(p1, k1, ver, r); enc(p2, k2, ver, r); // derive c3 for (i = 0; i < (versions[ver][0] >> 3); i++) c3[i] = p1[i] ^ dc[i]; // derive c4 for (i = 0; i < (versions[ver][0] >> 3); i++) c4[i] = p2[i] ^ dc[i]; dec(c3, k3, ver, r); dec(c4, k4, ver, r); bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((c3[i] ^ c4[i]) != dp[i]) flag = 0; if (flag) { num++; } } return num; } double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { // Parallel execution int NUM[N1]; int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2)); clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); init_prng(ID); for (int j = 0; j < N2; j++) { num += boomerang(R, ver, N3, dp, dc, dk1, dk2); } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); double sum = 0; double sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; printf("sum = %f\n", sum); sum_temp = (double)(N1 * N2 * N3) / sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } int main() { // srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand(); // init_prng(1); // //test all versions of Skinny // for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++) // { // sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]); // fic = fopen(name, "w"); // fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]); // TestVectors(i); // fclose(fic); // printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]); // } unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; unsigned char dk2[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int n = 1000; // Number of independet experiments int R = 6; // Number of rounds int ver = 4; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "00000000000000000006000000000000"; char dc_str[] = "00000000000000000000000000000000"; char dk1_str[] = "0000000000000000000000000200000000000000000000000000000004000000"; char dk2_str[] = "000000000000000000000000000000f80000000000000000000000000000007f"; // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); convert_hexstr_to_tweakarray(ver, dk2_str, dk2); //########################## Number of queries ######################### int N1 = Nthreads; // Number of parallel threads : N1 int deg1 = 12; int deg2 = 12; int N2 = 1 << deg1; // Number of bunches per thread: N2 = 2^(deg1) int N3 = 1 << deg2; // Number of queries per bunch: N3 = 2^(deg2) //################### Number of total queries : N1*N2*N3 ############### double sum = 0; for (int i = 0; i < n; i++) { sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2); } printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); // sum = (double)(n * N1 * N2 * N3) / sum; // printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2)); return 0; }
mpi_io.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "../splatt_mpi.h" #include "../io.h" #include "../timer.h" #include "../util.h" /****************************************************************************** * API FUNCTONS *****************************************************************************/ int splatt_mpi_csf_load( char const * const fname, splatt_idx_t * nmodes, splatt_csf ** tensors, double const * const options, MPI_Comm comm) { sptensor_t * tt = NULL; int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); return SPLATT_SUCCESS; } int splatt_mpi_coord_load( char const * const fname, splatt_idx_t * nmodes, splatt_idx_t * nnz, splatt_idx_t *** inds, splatt_val_t ** vals, double const * const options, MPI_Comm comm) { sptensor_t * tt = mpi_simple_distribute(fname, comm); if(tt == NULL) { *nmodes = 0; *nnz = 0; *vals = NULL; *inds = NULL; return SPLATT_ERROR_BADINPUT; } *nmodes = tt->nmodes; *nnz = tt->nnz; /* copy to output */ *vals = tt->vals; *inds = splatt_malloc(tt->nmodes * sizeof(**inds)); for(idx_t m=0; m < tt->nmodes; ++m) { (*inds)[m] = tt->ind[m]; } free(tt); return SPLATT_SUCCESS; } /****************************************************************************** * PRIVATE FUNCTONS *****************************************************************************/ /** * @brief Fill buf with the next 'nnz_to_read' tensor values. * * @param fin The file to read from. * @param buf The sptensor buffer to fill. * @param nnz_to_read The number of nonzeros to read. */ static void p_fill_tt_nnz( FILE * fin, sptensor_t * const buf, idx_t const * const offset, idx_t const nnz_to_read) { idx_t const nmodes = buf->nmodes; char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; idx_t nnzread = 0; while(nnzread < nnz_to_read && (read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10); buf->ind[m][nnzread] = ind - offset[m]; } val_t const v = strtod(ptr, &ptr); buf->vals[nnzread++] = v; } } } static int * p_distribute_parts( sptensor_t * const ttbuf, char const * const pfname, rank_info * const rinfo) { /* root may have more than target_nnz */ idx_t const target_nnz = rinfo->global_nnz / rinfo->npes; int * parts = (int *) splatt_malloc(SS_MAX(ttbuf->nnz, target_nnz) * sizeof(int)); if(rinfo->rank == 0) { int ret; FILE * fin = open_f(pfname, "r"); /* send to all other ranks */ for(int p=1; p < rinfo->npes; ++p) { /* read into buffer */ for(idx_t n=0; n < target_nnz; ++n) { if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) { fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", pfname); exit(1); } } MPI_Send(parts, target_nnz, MPI_INT, p, 0, rinfo->comm_3d); } /* now read my own part info */ for(idx_t n=0; n < ttbuf->nnz; ++n) { if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) { fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", pfname); exit(1); } } fclose(fin); } else { /* receive part info */ MPI_Recv(parts, ttbuf->nnz, MPI_INT, 0, 0, rinfo->comm_3d, &(rinfo->status)); } return parts; } static void p_find_my_slices_1d( idx_t ** const ssizes, idx_t const nmodes, idx_t const nnz, rank_info * const rinfo) { idx_t const * const dims = rinfo->global_dims; /* find start/end slices for my partition */ for(idx_t m=0; m < nmodes; ++m) { /* current processor */ int currp = 0; idx_t lastn = 0; idx_t nnzcnt = 0; idx_t pnnz = nnz / rinfo->npes; rinfo->layer_starts[m] = 0; rinfo->layer_ends[m] = dims[m]; rinfo->mat_start[m] = 0; rinfo->mat_end[m] = dims[m]; for(idx_t s=0; s < dims[m]; ++s) { if(nnzcnt >= lastn + pnnz) { /* choose this slice or the previous, whichever is closer */ if(s > 0) { idx_t const thisdist = nnzcnt - (lastn + pnnz); idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]); if(prevdist < thisdist) { lastn = nnzcnt - ssizes[m][s-1]; } else { lastn = nnzcnt; } } else { lastn = nnzcnt; } ++currp; /* adjust target nnz based on what is left */ pnnz = (nnz - lastn) / SS_MAX(1, rinfo->npes - currp); if(currp == rinfo->rank) { rinfo->mat_start[m] = s; } else if(currp == rinfo->rank+1 && currp != rinfo->npes) { /* only set mat_end if we aren't at the end of the tensor */ rinfo->mat_end[m] = s; break; } } nnzcnt += ssizes[m][s]; if(rinfo->rank == rinfo->npes-1) { assert(rinfo->mat_end[m] == rinfo->global_dims[m]); } } /* it is possible to have a very small dimension and too many ranks */ if(rinfo->npes > 1 && rinfo->mat_start[m] == 0 && rinfo->mat_end[m] == dims[m]) { fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\ SPLATT_PF_IDX".\n", rinfo->rank, m+1); rinfo->mat_start[m] = dims[m]; rinfo->mat_end[m] = dims[m]; } } } /** * @brief Count the nonzero values in a partition of X. * * @param fname The name of the file containing X. * @param nmodes The number of modes of X. * * @return The number of nonzeros in the intersection of all sstarts and sends. */ static idx_t p_count_my_nnz_1d( char const * const fname, idx_t const nmodes, idx_t const * const sstarts, idx_t const * const sends) { FILE * fin = open_f(fname, "r"); char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; /* count nnz in my partition */ idx_t mynnz = 0; while((read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { int mine = 0; ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10) - 1; /* I own the nnz if it falls in any of my slices */ if(ind >= sstarts[m] && ind < sends[m]) { mine = 1; break; } } if(mine) { ++mynnz; } /* skip over tensor val */ strtod(ptr, &ptr); } } fclose(fin); free(line); return mynnz; } /** * @brief Read a partition of X into tt. * * @param fname The file containing X. * @param tt The tensor structure (must be pre-allocated). * @param sstarts Array of starting slices, inclusive (one for each mode). * @param sends Array of ending slices, exclusive (one for each mode). */ static void p_read_tt_part_1d( char const * const fname, sptensor_t * const tt, idx_t const * const sstarts, idx_t const * const sends) { idx_t const nnz = tt->nnz; idx_t const nmodes = tt->nmodes; char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; FILE * fin = open_f(fname, "r"); idx_t nnzread = 0; while(nnzread < nnz && (read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { int mine = 0; ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10) - 1; tt->ind[m][nnzread] = ind; if(ind >= sstarts[m] && ind < sends[m]) { mine = 1; } } tt->vals[nnzread] = strtod(ptr, &ptr); if(mine) { ++nnzread; } } } fclose(fin); free(line); } /** * @brief Read my portion of X from a file. * * @param fname The file containing X. * @param ssizes The nonzero counts in each slice. * @param nmodes The number of modes in X. * @param rinfo MPI information (nnz, 3D comm, etc.). * * @return My portion of the sparse tensor read from fname. */ static sptensor_t * p_read_tt_1d( char const * const fname, idx_t ** const ssizes, idx_t const nmodes, rank_info * const rinfo) { int const rank = rinfo->rank; idx_t const nnz = rinfo->global_nnz; idx_t const * const dims = rinfo->global_dims; /* find start/end slices for my partition */ p_find_my_slices_1d(ssizes, nmodes, nnz, rinfo); /* count nnz in my partition and allocate */ idx_t const mynnz = p_count_my_nnz_1d(fname, nmodes, rinfo->mat_start, rinfo->mat_end); sptensor_t * tt = tt_alloc(mynnz, nmodes); /* now actually load values */ p_read_tt_part_1d(fname, tt, rinfo->mat_start, rinfo->mat_end); return tt; } /** * @brief Find the boundaries for a process layer. * * @param ssizes The number of nonzeros found in each index (of each mode). * ssizes[1][5] is the number of nonzeros in X(:,5,:). * @param mode Which mode to work on. * @param rinfo MPI rank information. */ static void p_find_layer_boundaries( idx_t ** const ssizes, idx_t const mode, rank_info * const rinfo) { idx_t const * const dims = rinfo->global_dims; idx_t const nnz = rinfo->global_nnz; idx_t const m = mode; /* find start/end slices for my partition */ int const layer_dim = rinfo->dims_3d[m]; idx_t pnnz = nnz / layer_dim; /* nnz in a layer */ /* current processor */ int currp = 0; idx_t lastn = 0; idx_t nnzcnt = ssizes[m][0]; /* initialize layer_ptrs */ rinfo->layer_ptrs[m] = splatt_malloc((layer_dim+1) * sizeof(**(rinfo->layer_ptrs))); rinfo->layer_ptrs[m][currp++] = 0; rinfo->layer_ptrs[m][layer_dim] = dims[m]; if(layer_dim == 1) { goto CLEANUP; return; } /* foreach slice */ for(idx_t s=1; s < dims[m]; ++s) { /* if we have passed the next layer boundary */ if(nnzcnt >= lastn + pnnz) { /* choose this slice or the previous, whichever is closer */ idx_t const thisdist = nnzcnt - (lastn + pnnz); idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]); if(prevdist < thisdist) { lastn = nnzcnt - ssizes[m][s-1]; /* see below comment */ //rinfo->layer_ptrs[m][currp++] = s-1; } else { lastn = nnzcnt; //rinfo->layer_ptrs[m][currp++] = s; } /* Always choosing s but marking lastn with s-1 leads to better balance * and communication volume. This is totally a heuristic. */ rinfo->layer_ptrs[m][currp++] = s; /* exit early if we placed the last rank */ if(currp == layer_dim) { break; } /* adjust target nnz based on what is left */ pnnz = (nnz - lastn) / SS_MAX(1, layer_dim - (currp-1)); } nnzcnt += ssizes[m][s]; } CLEANUP: /* store layer bounderies in layer_{starts, ends} */ rinfo->layer_starts[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m]]; rinfo->layer_ends[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m] + 1]; /* it is possible to have a very small dimension and too many ranks */ if(rinfo->dims_3d[m] > 1 && rinfo->layer_ends[m] - rinfo->layer_starts[m] == dims[m]) { fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\ SPLATT_PF_IDX".\n", rinfo->rank, m+1); rinfo->layer_starts[m] = dims[m]; rinfo->layer_ends[m] = dims[m]; } } /** * @brief Rearrange nonzeros according to a medium-grained decomposition. * * @param ttbuf The tensor to rearrange. * @param ssizes The number of nonzeros found in each index. * @param rinfo MPI rank information. * * @return My owned tensor nonzeros. */ static sptensor_t * p_rearrange_medium( sptensor_t * const ttbuf, idx_t * * ssizes, rank_info * const rinfo) { #pragma omp parallel for schedule(static, 1) for(idx_t m=0; m < ttbuf->nmodes; ++m) { p_find_layer_boundaries(ssizes, m, rinfo); } /* create partitioning */ int * parts = splatt_malloc(ttbuf->nnz * sizeof(*parts)); #pragma omp parallel for schedule(static) for(idx_t n=0; n < ttbuf->nnz; ++n) { parts[n] = mpi_determine_med_owner(ttbuf, n, rinfo); } sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d); splatt_free(parts); return tt; } /** * @brief Rearrange nonzeros according to a medium-grained decomposition. * * @param ttbuf The tensor to rearrange. * @param pfname The filename containing the partitioning information. * @param ssizes The number of nonzeros found in each index. * @param rinfo MPI rank information. * * @return My owned tensor nonzeros. */ static sptensor_t * p_rearrange_fine( sptensor_t * const ttbuf, char const * const pfname, idx_t * * ssizes, rank_info * const rinfo) { /* first distribute partitioning information */ int * parts = p_distribute_parts(ttbuf, pfname, rinfo); sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d); free(parts); return tt; } /** * @brief Count the nonzeros in each slice of X. * * @param tt My subtensor. * @param ssizes A 2D array for counting slice 'sizes'. * @param rinfo MPI information (containing global dims, nnz, etc.). */ static void p_fill_ssizes( sptensor_t const * const tt, idx_t ** const ssizes, rank_info const * const rinfo) { for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const ind = tt->ind[m]; for(idx_t n=0; n < tt->nnz; ++n) { ssizes[m][ind[n]] += 1; } /* reduce to get total slice counts */ MPI_Allreduce(MPI_IN_PLACE, ssizes[m], (int) rinfo->global_dims[m], SPLATT_MPI_IDX, MPI_SUM, rinfo->comm_3d); } } /** * @brief Fill in the best MPI dimensions we can find. The truly optimal * solution should involve the tensor's sparsity pattern, but in general * this works as good (but usually better) than the hand-tuned dimensions * that we tried. * * @param rinfo MPI rank information. */ static void p_get_best_mpi_dim( rank_info * const rinfo) { int nprimes = 0; int * primes = get_primes(rinfo->npes, &nprimes); idx_t total_size = 0; for(idx_t m=0; m < rinfo->nmodes; ++m) { total_size += rinfo->global_dims[m]; /* reset mpi dims */ rinfo->dims_3d[m] = 1; } idx_t target = total_size / (idx_t)rinfo->npes; long diffs[MAX_NMODES]; /* start from the largest prime */ for(int p = nprimes-1; p >= 0; --p) { int furthest = 0; /* find dim furthest from target */ for(idx_t m=0; m < rinfo->nmodes; ++m) { /* distance is current - target */ idx_t const curr = rinfo->global_dims[m] / rinfo->dims_3d[m]; /* avoid underflow */ diffs[m] = (curr > target) ? (curr - target) : 0; if(diffs[m] > diffs[furthest]) { furthest = m; } } /* assign p processes to furthest mode */ rinfo->dims_3d[furthest] *= primes[p]; } free(primes); } /** * @brief Read a sparse tensor in coordinate form from a text file and * and distribute among MPI ranks. * * @param fin The file to read from. * @param comm The MPI communicator to distribute among. * * @return The sparse tensor. */ static sptensor_t * p_tt_mpi_read_file( FILE * fin, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); idx_t dims[MAX_NMODES]; idx_t offsets[MAX_NMODES]; idx_t global_nnz; idx_t nmodes; sptensor_t * tt = NULL; if(rank == 0) { /* send dimension info */ tt_get_dims(fin, &nmodes, &global_nnz, dims, offsets); rewind(fin); MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } else { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } /* compute my even chunk of nonzeros -- root rank gets the extra amount */ idx_t const target_nnz = global_nnz / npes; idx_t my_nnz = target_nnz; if(rank == 0) { my_nnz = global_nnz - ((npes-1) * my_nnz); } /* read/send all chunks */ if(rank == 0) { sptensor_t * tt_buf = tt_alloc(target_nnz, nmodes); /* now send to everyone else */ for(int p=1; p < npes; ++p) { p_fill_tt_nnz(fin, tt_buf, offsets, target_nnz); for(idx_t m=0; m < tt_buf->nmodes; ++m) { MPI_Send(tt_buf->ind[m], target_nnz, SPLATT_MPI_IDX, p, m, comm); } MPI_Send(tt_buf->vals, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm); } tt_free(tt_buf); /* load my own */ tt = tt_alloc(my_nnz, nmodes); p_fill_tt_nnz(fin, tt, offsets, my_nnz); } else { MPI_Status status; /* receive my chunk */ tt = tt_alloc(my_nnz, nmodes); for(idx_t m=0; m < tt->nmodes; ++m) { MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status); } MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status); } return tt; } /** * @brief Read a sparse tensor in coordinate form from a binary file and * distribute among MPI ranks. * * @param fin The file to read from. * @param comm The MPI communicator to distribute among. * * @return The sparse tensor. */ static sptensor_t * p_tt_mpi_read_binary_file( FILE * fin, MPI_Comm comm) { sptensor_t * tt = NULL; int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); idx_t global_nnz; idx_t nmodes; idx_t dims[MAX_NMODES]; /* get header and tensor stats */ bin_header header; if(rank == 0) { read_binary_header(fin, &header); fill_binary_idx(&nmodes, 1, &header, fin); fill_binary_idx(dims, nmodes, &header, fin); fill_binary_idx(&global_nnz, 1, &header, fin); } /* send dimension info */ if(rank == 0) { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } else { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } /* sanity check */ if(nmodes > MAX_NMODES) { if(rank == 0) { fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. " "Found %"SPLATT_PF_IDX". Please recompile with " "MAX_NMODES=%"SPLATT_PF_IDX".\n", MAX_NMODES, nmodes, nmodes); } return NULL; } /* compute my even chunk of nonzeros -- root rank gets the extra amount */ idx_t const target_nnz = global_nnz / npes; idx_t my_nnz = target_nnz; if(rank == 0) { my_nnz = global_nnz - ((npes-1)* target_nnz); } tt = tt_alloc(my_nnz, nmodes); /* read/send all chunks */ if(rank == 0) { /* handle inds */ idx_t * ibuf = splatt_malloc(target_nnz * sizeof(idx_t)); for(idx_t m=0; m < nmodes; ++m) { for(int p=1; p < npes; ++p) { fill_binary_idx(ibuf, target_nnz, &header, fin); MPI_Send(ibuf, target_nnz, SPLATT_MPI_IDX, p, m, comm); } /* load my own */ fill_binary_idx(tt->ind[m], my_nnz, &header, fin); } splatt_free(ibuf); /* now vals */ val_t * vbuf = splatt_malloc(target_nnz * sizeof(val_t)); for(int p=1; p < npes; ++p) { fill_binary_val(vbuf, target_nnz, &header, fin); MPI_Send(vbuf, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm); } splatt_free(vbuf); /* finally, load my own vals */ fill_binary_val(tt->vals, my_nnz, &header, fin); } else { /* non-root ranks just recv */ MPI_Status status; /* receive my chunk */ for(idx_t m=0; m < tt->nmodes; ++m) { MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status); } MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status); } return tt; } /****************************************************************************** * PUBLIC FUNCTONS *****************************************************************************/ sptensor_t * mpi_tt_read( char const * const ifname, char const * const pfname, rank_info * const rinfo) { timer_start(&timers[TIMER_IO]); /* first just make sure it exists */ FILE * fin; if((fin = fopen(ifname, "r")) == NULL) { if(rinfo->rank == 0) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", ifname); } return NULL; } fclose(fin); /* first naively distribute tensor nonzeros for analysis */ sptensor_t * ttbuf = mpi_simple_distribute(ifname, MPI_COMM_WORLD); rinfo->nmodes = ttbuf->nmodes; MPI_Allreduce(&(ttbuf->nnz), &(rinfo->global_nnz), 1, SPLATT_MPI_IDX, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(ttbuf->dims, &(rinfo->global_dims), ttbuf->nmodes, SPLATT_MPI_IDX, MPI_MAX, MPI_COMM_WORLD); /* first compute MPI dimension if not specified by the user */ if(rinfo->decomp == DEFAULT_MPI_DISTRIBUTION) { rinfo->decomp = SPLATT_DECOMP_MEDIUM; p_get_best_mpi_dim(rinfo); } mpi_setup_comms(rinfo); /* count # nonzeros found in each index */ idx_t * ssizes[MAX_NMODES]; for(idx_t m=0; m < ttbuf->nmodes; ++m) { ssizes[m] = (idx_t *) calloc(rinfo->global_dims[m], sizeof(idx_t)); } p_fill_ssizes(ttbuf, ssizes, rinfo); /* actually parse tensor */ sptensor_t * tt = NULL; switch(rinfo->decomp) { case SPLATT_DECOMP_COARSE: tt = p_read_tt_1d(ifname, ssizes, ttbuf->nmodes, rinfo); /* now fix tt->dims */ for(idx_t m=0; m < tt->nmodes; ++m) { tt->dims[m] = 0; for(idx_t n=0; n < tt->nnz; ++n) { tt->dims[m] = SS_MAX(tt->dims[m], tt->ind[m][n] + 1); } } break; case SPLATT_DECOMP_MEDIUM: tt = p_rearrange_medium(ttbuf, ssizes, rinfo); /* now map tensor indices to local (layer) coordinates and fill in dims */ #pragma omp parallel for schedule(static, 1) for(idx_t m=0; m < ttbuf->nmodes; ++m) { tt->dims[m] = rinfo->layer_ends[m] - rinfo->layer_starts[m]; for(idx_t n=0; n < tt->nnz; ++n) { assert(tt->ind[m][n] >= rinfo->layer_starts[m]); assert(tt->ind[m][n] < rinfo->layer_ends[m]); tt->ind[m][n] -= rinfo->layer_starts[m]; } } break; case SPLATT_DECOMP_FINE: tt = p_rearrange_fine(ttbuf, pfname, ssizes, rinfo); /* now fix tt->dims */ for(idx_t m=0; m < tt->nmodes; ++m) { tt->dims[m] = rinfo->global_dims[m]; rinfo->layer_ends[m] = tt->dims[m]; } break; } for(idx_t m=0; m < ttbuf->nmodes; ++m) { free(ssizes[m]); } tt_free(ttbuf); timer_stop(&timers[TIMER_IO]); return tt; } void mpi_filter_tt_1d( idx_t const mode, sptensor_t const * const tt, sptensor_t * const ftt, idx_t start, idx_t end) { assert(ftt != NULL); for(idx_t m=0; m < ftt->nmodes; ++m) { ftt->dims[m] = tt->dims[m]; } idx_t const olds = start; idx_t const olde = end; /* Adjust start and end if tt has been compressed. */ assert(start != end); if(tt->indmap[mode] != NULL) { /* TODO: change this linear search into a binary one */ for(idx_t i=0; i < tt->dims[mode]; ++i) { if(tt->indmap[mode][i] == start) { start = i; } if(tt->indmap[mode][i]+1 == end) { end = i+1; break; } } } idx_t nnz = 0; for(idx_t n=0; n < tt->nnz; ++n) { /* Copy the nonzero if we own the slice. */ if(tt->ind[mode][n] >= start && tt->ind[mode][n] < end) { for(idx_t m=0; m < tt->nmodes; ++m) { ftt->ind[m][nnz] = tt->ind[m][n]; } ftt->vals[nnz++] = tt->vals[n]; } } /* update ftt dimensions and nnz */ ftt->nnz = nnz; ftt->dims[mode] = end - start; /* now map mode coords to [0, end-start) */ for(idx_t n=0; n < ftt->nnz; ++n) { assert(ftt->ind[mode][n] >= start); assert(ftt->ind[mode][n] < end); ftt->ind[mode][n] -= start; } /* create new indmap for mode */ for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->indmap[m] == NULL) { break; } ftt->indmap[m] = (idx_t *) realloc(ftt->indmap[m], ftt->dims[m] * sizeof(idx_t)); /* mode indices are shifted. otherwise just copy */ if(m == mode) { for(idx_t i=0; i < ftt->dims[mode]; ++i) { ftt->indmap[mode][i] = tt->indmap[mode][i+start]; } } else { par_memcpy(ftt->indmap[m], tt->indmap[m], tt->dims[m] * sizeof(idx_t)); } } /* sanity check */ for(idx_t i=0; i < ftt->dims[mode]; ++i) { assert(i + start < end); } for(idx_t n=0; n < ftt->nnz; ++n) { assert(ftt->ind[mode][n] < end - start); } } void mpi_write_mats( matrix_t ** mats, permutation_t const * const perm, rank_info const * const rinfo, char const * const basename, idx_t const nmodes) { char * fname; idx_t const nfactors = mats[0]->J; MPI_Status status; idx_t maxdim = 0; idx_t maxlocaldim = 0; matrix_t * matbuf = NULL; val_t * vbuf = NULL; idx_t * loc_iperm = NULL; for(idx_t m=0; m < nmodes; ++m) { maxdim = SS_MAX(maxdim, rinfo->global_dims[m]); maxlocaldim = SS_MAX(maxlocaldim, mats[m]->I); } /* get the largest local dim */ if(rinfo->rank == 0) { MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } else { MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } if(rinfo->rank == 0) { matbuf = mat_alloc(maxdim, nfactors); loc_iperm = (idx_t *) splatt_malloc(maxdim * sizeof(idx_t)); vbuf = (val_t *) splatt_malloc(maxdim * nfactors * sizeof(val_t)); } for(idx_t m=0; m < nmodes; ++m) { /* root handles the writing */ if(rinfo->rank == 0) { asprintf(&fname, "%s%"SPLATT_PF_IDX".mat", basename, m+1); matbuf->I = rinfo->global_dims[m]; /* copy root's matrix to buffer */ for(idx_t i=0; i < mats[m]->I; ++i) { idx_t const gi = rinfo->layer_starts[m] + perm->iperms[m][i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = mats[m]->vals[f+(i*nfactors)]; } } /* receive matrix from each rank */ for(int p=1; p < rinfo->npes; ++p) { idx_t layerstart; idx_t nrows; MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 0, rinfo->comm_3d, &status); MPI_Recv(loc_iperm, nrows, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); /* permute buffer and copy into matbuf */ for(idx_t i=0; i < nrows; ++i) { idx_t const gi = layerstart + loc_iperm[i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = vbuf[f+(i*nfactors)]; } } } /* write the factor matrix to disk */ mat_write(matbuf, fname); /* clean up */ free(fname); } else { /* send matrix to root */ MPI_Send(&(rinfo->layer_starts[m]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(&(mats[m]->I), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(mats[m]->vals, mats[m]->I * mats[m]->J, SPLATT_MPI_VAL, 0, 0, rinfo->comm_3d); MPI_Send(perm->iperms[m] + rinfo->mat_start[m], mats[m]->I, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); } } /* foreach mode */ if(rinfo->rank == 0) { mat_free(matbuf); free(vbuf); free(loc_iperm); } } void mpi_write_part( sptensor_t const * const tt, permutation_t const * const perm, rank_info const * const rinfo) { /* file name is <rank>.part */ char name[256]; sprintf(name, "%d.part", rinfo->rank); FILE * fout = open_f(name, "w"); for(idx_t n=0; n < tt->nnz; ++n) { for(idx_t m=0; m < tt->nmodes; ++m) { /* map idx to original global coordinate */ idx_t idx = tt->ind[m][n]; if(tt->indmap[m] != NULL) { idx = tt->indmap[m][idx]; } if(perm->iperms[m] != NULL) { idx = perm->iperms[m][idx]; } /* write index */ fprintf(fout, "%"SPLATT_PF_IDX" ", 1+idx); } fprintf(fout, "%"SPLATT_PF_VAL"\n", tt->vals[n]); } fclose(fout); } sptensor_t * mpi_simple_distribute( char const * const ifname, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); sptensor_t * tt = NULL; FILE * fin = NULL; if(rank == 0) { fin = open_f(ifname, "r"); } switch(get_file_type(ifname)) { case SPLATT_FILE_TEXT_COORD: tt = p_tt_mpi_read_file(fin, comm); break; case SPLATT_FILE_BIN_COORD: tt = p_tt_mpi_read_binary_file(fin, comm); break; } if(rank == 0) { fclose(fin); } /* set dims info */ #pragma omp parallel for schedule(static, 1) for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const inds = tt->ind[m]; idx_t dim = 1 +inds[0]; for(idx_t n=1; n < tt->nnz; ++n) { dim = SS_MAX(dim, 1 + inds[n]); } tt->dims[m] = dim; } return tt; } matrix_t * mpi_mat_rand( idx_t const mode, idx_t const nfactors, permutation_t const * const perm, rank_info * const rinfo) { idx_t const localdim = rinfo->mat_end[mode] - rinfo->mat_start[mode]; matrix_t * mymat = mat_alloc(localdim, nfactors); MPI_Status status; /* figure out buffer sizes */ idx_t maxlocaldim = localdim; if(rinfo->rank == 0) { MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } else { MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } /* root rank does the heavy lifting */ if(rinfo->rank == 0) { /* allocate buffers */ idx_t * loc_perm = splatt_malloc(maxlocaldim * sizeof(*loc_perm)); val_t * vbuf = splatt_malloc(maxlocaldim * nfactors * sizeof(*vbuf)); /* allocate initial factor */ matrix_t * full_factor = mat_rand(rinfo->global_dims[mode], nfactors); /* copy root's own matrix to output */ #pragma omp parallel for schedule(static) for(idx_t i=0; i < localdim; ++i) { idx_t const gi = rinfo->mat_start[mode] + perm->iperms[mode][i]; for(idx_t f=0; f < nfactors; ++f) { mymat->vals[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)]; } } /* communicate! */ for(int p=1; p < rinfo->npes; ++p) { /* first receive layer start and permutation info */ idx_t layerstart; idx_t nrows; MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 1, rinfo->comm_3d, &status); MPI_Recv(loc_perm, nrows, SPLATT_MPI_IDX, p, 2, rinfo->comm_3d, &status); /* fill buffer */ #pragma omp parallel for schedule(static) for(idx_t i=0; i < nrows; ++i) { idx_t const gi = layerstart + loc_perm[i]; for(idx_t f=0; f < nfactors; ++f) { vbuf[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)]; } } /* send to rank p */ MPI_Send(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 3, rinfo->comm_3d); } mat_free(full_factor); splatt_free(loc_perm); splatt_free(vbuf); /* other ranks just send/recv */ } else { /* send permutation info to root */ MPI_Send(&(rinfo->layer_starts[mode]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(&localdim, 1, SPLATT_MPI_IDX, 0, 1, rinfo->comm_3d); MPI_Send(perm->iperms[mode] + rinfo->mat_start[mode], localdim, SPLATT_MPI_IDX, 0, 2, rinfo->comm_3d); /* receive factor */ MPI_Recv(mymat->vals, mymat->I * mymat->J, SPLATT_MPI_VAL, 0, 3, rinfo->comm_3d, &status); } return mymat; } sptensor_t * mpi_rearrange_by_part( sptensor_t const * const ttbuf, int const * const parts, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); /* count how many to send to each process */ int * nsend = calloc(npes, sizeof(*nsend)); int * nrecv = calloc(npes, sizeof(*nrecv)); for(idx_t n=0; n < ttbuf->nnz; ++n) { nsend[parts[n]] += 1; } MPI_Alltoall(nsend, 1, MPI_INT, nrecv, 1, MPI_INT, comm); idx_t send_total = 0; idx_t recv_total = 0; for(int p=0; p < npes; ++p) { send_total += nsend[p]; recv_total += nrecv[p]; } assert(send_total = ttbuf->nnz); /* how many nonzeros I'll own */ idx_t const nowned = recv_total; int * send_disp = splatt_malloc((npes+1) * sizeof(*send_disp)); int * recv_disp = splatt_malloc((npes+1) * sizeof(*recv_disp)); /* recv_disp is const so we'll just fill it out once */ recv_disp[0] = 0; for(int p=1; p <= npes; ++p) { recv_disp[p] = recv_disp[p-1] + nrecv[p-1]; } /* allocate my tensor and send buffer */ sptensor_t * tt = tt_alloc(nowned, ttbuf->nmodes); idx_t * isend_buf = splatt_malloc(ttbuf->nnz * sizeof(*isend_buf)); /* rearrange into sendbuf and send one mode at a time */ for(idx_t m=0; m < ttbuf->nmodes; ++m) { /* prefix sum to make disps */ send_disp[0] = send_disp[1] = 0; for(int p=2; p <= npes; ++p) { send_disp[p] = send_disp[p-1] + nsend[p-2]; } idx_t const * const ind = ttbuf->ind[m]; for(idx_t n=0; n < ttbuf->nnz; ++n) { idx_t const index = send_disp[parts[n]+1]++; isend_buf[index] = ind[n]; } /* exchange indices */ MPI_Alltoallv(isend_buf, nsend, send_disp, SPLATT_MPI_IDX, tt->ind[m], nrecv, recv_disp, SPLATT_MPI_IDX, comm); } splatt_free(isend_buf); /* lastly, rearrange vals */ val_t * vsend_buf = splatt_malloc(ttbuf->nnz * sizeof(*vsend_buf)); send_disp[0] = send_disp[1] = 0; for(int p=2; p <= npes; ++p) { send_disp[p] = send_disp[p-1] + nsend[p-2]; } val_t const * const vals = ttbuf->vals; for(idx_t n=0; n < ttbuf->nnz; ++n) { idx_t const index = send_disp[parts[n]+1]++; vsend_buf[index] = vals[n]; } /* exchange vals */ MPI_Alltoallv(vsend_buf, nsend, send_disp, SPLATT_MPI_VAL, tt->vals, nrecv, recv_disp, SPLATT_MPI_VAL, comm); splatt_free(vsend_buf); splatt_free(send_disp); splatt_free(recv_disp); /* allocated with calloc */ free(nsend); free(nrecv); return tt; } int mpi_determine_med_owner( sptensor_t * const ttbuf, idx_t const n, rank_info * const rinfo) { int coords[MAX_NMODES]; assert(rinfo->decomp == SPLATT_DECOMP_MEDIUM); /* determine the coordinates of the owner rank */ for(idx_t m=0; m < ttbuf->nmodes; ++m) { idx_t const id = ttbuf->ind[m][n]; /* silly linear scan over each layer. * TODO: do a binary search */ for(int l=0; l <= rinfo->dims_3d[m]; ++l) { if(id < rinfo->layer_ptrs[m][l]) { coords[m] = l-1; break; } } } /* translate that to an MPI rank */ int owner; MPI_Cart_rank(rinfo->comm_3d, coords, &owner); return owner; }
GB_binop__bset_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int8) // A.*B function (eWiseMult): GB (_AemultB_08__bset_int8) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int8) // A.*B function (eWiseMult): GB (_AemultB_04__bset_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int8) // C=scalar+B GB (_bind1st__bset_int8) // C=scalar+B' GB (_bind1st_tran__bset_int8) // C=A+scalar GB (_bind2nd__bset_int8) // C=A'+scalar GB (_bind2nd_tran__bset_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITSET (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT8 || GxB_NO_BSET_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bset_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bset_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bset_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pumathreadpool.c
#define _GNU_SOURCE #include <sched.h> #include "pumathreadpool.h" #include "internal/profiling.h" #include "internal/pthreadbarrier.h" #include <stdlib.h> #include <stdbool.h> #include <stdio.h> #include <string.h> #include <pthread.h> #include <unistd.h> #ifndef NOOPENMP #include <omp.h> #endif #ifdef NOOPENMP static pthread_once_t offsetKeyOnce = PTHREAD_ONCE_INIT; static pthread_key_t timeOffsetKey; #endif // NOOPENMP #ifdef STATIC_THREADPOOL static bool _setupComplete = false; static struct pumaThreadPool* threadPool; #endif struct _threadPoolWorkerInfo { struct pumaThreadPool* pool; size_t cpu; size_t threadNum; double timeSeconds; }; struct pumaThreadPool { size_t numThreads; pthread_t* threads; struct _threadPoolWorkerInfo** threadsInfo; pthread_barrier_t workWaitBarrier; pthread_barrier_t doneBarrier; pthread_rwlock_t workFunctionLock; void (*workFunction)(void* arg); void* arg; double timeSeconds; }; #ifdef NOOPENMP static pthread_key_t numThreadsKey; static pthread_key_t threadNumKey; static pthread_key_t cpuNumKey; static pthread_once_t key_once = PTHREAD_ONCE_INIT; #endif // NOOPENMP size_t pumaGetThreadNum(void) { #ifdef NOOPENMP void* ptr = pthread_getspecific(threadNumKey); if(ptr != NULL) return *((size_t*)ptr); else return 0; #else return omp_get_thread_num(); #endif // NOOPENMP } size_t pumaGetNumThreads(struct pumaThreadPool* pool) { if(pool == NULL) { #ifdef NOOPENMP return *((size_t*)pthread_getspecific(numThreadsKey)); #else return omp_get_max_threads(); #endif // NOOPENMP } else return pool->numThreads; } size_t pumaGetCPUNum(void) { #ifdef NOOPENMP void* ptr = pthread_getspecific(cpuNumKey); if(ptr != NULL) return *((size_t*)ptr); else return 0; #else return omp_get_thread_num(); #endif // NOOPENMP } #ifdef NOOPENMP #ifdef __linux__ static void _parseAffinityStr(char* affinityStr, cpu_set_t* set) { char* saveptr; char* str = affinityStr; char* token; CPU_ZERO(set); while(token = strtok_r(str, ",", &saveptr)) { str = NULL; unsigned int start; unsigned int end; int matched = sscanf(token, "%u-%u", &start, &end); if(matched == 1) { CPU_SET(start, set); } else if(matched == 2) { for(unsigned int i = start; i <= end; ++i) CPU_SET(i, set); } else { fprintf(stderr, "Error parsing affinity string!\n"); exit(-1); } } } #endif static inline double _max(double a, double b) { return (a > b) * a + (a <= b) * b; } static void _makeTimeKey(void) { pthread_key_create(&timeOffsetKey, NULL); pthread_setspecific(timeOffsetKey, malloc(sizeof(double))); *(double*)pthread_getspecific(timeOffsetKey) = 0; } #endif // NOOPENMP void executeOnThreadPool(struct pumaThreadPool* tp, void (*workFunction)(void* arg), void* arg) { #ifdef NOOPENMP pthread_rwlock_wrlock(&tp->workFunctionLock); tp->workFunction = workFunction; tp->arg = arg; pthread_barrier_wait(&tp->workWaitBarrier); pthread_rwlock_unlock(&tp->workFunctionLock); pthread_barrier_wait(&tp->doneBarrier); tp->timeSeconds = 0; for(size_t i = 0; i < tp->numThreads; ++i) tp->timeSeconds = _max(tp->threadsInfo[i]->timeSeconds, tp->timeSeconds); (void)pthread_once(&offsetKeyOnce, &_makeTimeKey); *(double*)pthread_getspecific(timeOffsetKey) += tp->timeSeconds; #else #pragma omp parallel num_threads(tp->numThreads) { workFunction(arg); } #endif // NOOPENMP } #ifdef NOOPENMP static void* _threadPoolWorker(void* arg) { struct _threadPoolWorkerInfo* info = (struct _threadPoolWorkerInfo*)arg; struct pumaThreadPool* pool = info->pool; /* Local stack copy because each pthreads thread has its own stack and we don't do cross-thread stack writes, meaning that the stack's pages will be numa domain local due to first-touch paging. */ size_t numThreads = pool->numThreads; size_t threadNum = info->threadNum; size_t cpu = info->cpu; pthread_setspecific(numThreadsKey, &numThreads); pthread_setspecific(threadNumKey, &threadNum); pthread_setspecific(cpuNumKey, &cpu); #ifdef __linux__ cpu_set_t set; CPU_ZERO(&set); CPU_SET(info->cpu, &set); sched_setaffinity(0, sizeof(set), &set); #endif PROFILING_DECLS(doWork); while(true) { pthread_barrier_wait(&pool->workWaitBarrier); pthread_rwlock_rdlock(&pool->workFunctionLock); void (*workFunction)(void* arg) = pool->workFunction; void* arg = pool->arg; pthread_rwlock_unlock(&pool->workFunctionLock); PROFILE(doWork, workFunction(arg);) info->timeSeconds = GET_ELAPSED_S(doWork); pthread_barrier_wait(&pool->doneBarrier); } } static void _make_keys(void) { pthread_key_create(&numThreadsKey, NULL); pthread_key_create(&threadNumKey, NULL); pthread_key_create(&cpuNumKey, NULL); } #endif // NOOPENMP #ifdef STATIC_THREADPOOL struct pumaThreadPool* getThreadPool(void) { if(!_setupComplete) fprintf(stderr, "Please call setupThreadPool() before getThreadPool()."); return threadPool; } void setupThreadPool(size_t numThreads, char* affinityStr) { (void)affinityStr; if(_setupComplete) return; #else struct pumaThreadPool* createThreadPool(size_t numThreads, char* affinityStr) { (void)affinityStr; struct pumaThreadPool* threadPool; #endif threadPool = (struct pumaThreadPool*)calloc(1, sizeof(struct pumaThreadPool)); numThreads = (numThreads != 0) ? numThreads : sysconf(_SC_NPROCESSORS_ONLN); threadPool->numThreads = numThreads; threadPool->threads = (pthread_t*)calloc(numThreads, sizeof(pthread_t)); #ifdef NOOPENMP threadPool->threadsInfo = (struct _threadPoolWorkerInfo**)calloc(numThreads, sizeof(struct _threadPoolWorkerInfo*)); pthread_barrier_init(&threadPool->workWaitBarrier, NULL, numThreads + 1); pthread_barrier_init(&threadPool->doneBarrier, NULL, numThreads + 1); pthread_rwlock_init(&threadPool->workFunctionLock, NULL); (void)pthread_once(&key_once, &_make_keys); size_t currThread = 0; #ifdef __linux__ if(affinityStr != NULL) { cpu_set_t cpus; CPU_ZERO(&cpus); _parseAffinityStr(affinityStr, &cpus); size_t nCPUs = CPU_COUNT(&cpus); for(size_t i = 0; i < nCPUs && currThread < numThreads; ++i) { if(CPU_ISSET(i, &cpus)) { struct _threadPoolWorkerInfo* tpInfo = (struct _threadPoolWorkerInfo*)malloc( sizeof(struct _threadPoolWorkerInfo)); tpInfo->pool = threadPool; tpInfo->cpu = i; tpInfo->threadNum = currThread; threadPool->threadsInfo[currThread] = tpInfo; pthread_create(&threadPool->threads[currThread++], NULL, &_threadPoolWorker, tpInfo); } } } else #endif // __linux__ { for(size_t i = 0; currThread < numThreads; ++i) { struct _threadPoolWorkerInfo* tpInfo = (struct _threadPoolWorkerInfo*)malloc( sizeof(struct _threadPoolWorkerInfo)); tpInfo->pool = threadPool; tpInfo->cpu = i; tpInfo->threadNum = currThread; threadPool->threadsInfo[currThread] = tpInfo; pthread_create(&threadPool->threads[currThread++], NULL, &_threadPoolWorker, tpInfo); } } #else omp_set_num_threads(numThreads); #pragma omp parallel { #ifdef __linux__ cpu_set_t set; CPU_ZERO(&set); CPU_SET(omp_get_thread_num(), &set); sched_setaffinity(0, sizeof(set), &set); #endif } #endif // NOOPENMP #ifdef STATIC_THREADPOOL _setupComplete = true; #else return threadPool; #endif } double pumaGetTimeWaitedForPool(void) { #ifdef NOOPENMP void* ptr = pthread_getspecific(timeOffsetKey); if(ptr != NULL) return *((double*)ptr); #endif // NOOPENMP return 0; } void freeThreadPool(struct pumaThreadPool* pool) { for(size_t i = 0; i < pool->numThreads; ++i) (void)pthread_cancel(pool->threads[i]); #ifdef NOOPENMP pthread_barrier_destroy(&pool->workWaitBarrier); pthread_barrier_destroy(&pool->doneBarrier); #endif // NOOPENMP free(pool->threads); free(pool); }
FFTImageFilter.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_FFTIMAGEFILTER_H #define BK_FFTIMAGEFILTER_H #include <cassert> #include <complex> #include <vector> #include <bk/FFT> #include <bkDataset/lib/bkDataset_export.h> namespace bk { class BKDATASET_EXPORT FFTImageFilter { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = FFTImageFilter; //==================================================================================================== //===== MEMBERS //==================================================================================================== std::vector<unsigned int> _off; std::vector<bool> _orig_size_uneven; bool _normalization; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR FFTImageFilter(); FFTImageFilter(const self_type&); FFTImageFilter(self_type&&) noexcept; /// @} /// @{ -------------------------------------------------- DTOR ~FFTImageFilter(); /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- GET PADDING SIZE /*! * @brief padding per dimension * * This FFT implementation requires that the size of each dimension is a power of 2. * The original image will be copied into the middle of a new zero-padded image. * This function provides the size of the boundary (padding) per dimension. */ [[nodiscard]] const std::vector<unsigned int>& padding_size() const; [[nodiscard]] const std::vector<bool>& size_uneven() const; /// @} /// @{ -------------------------------------------------- SET NORMALIZATION [[nodiscard]] bool normalization_is_enabled() const; /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] auto operator=(const self_type& other) -> self_type&; [[maybe_unused]] auto operator=(self_type&& other) noexcept -> self_type&; /// @} /// @{ -------------------------------------------------- SET NORMALIZATION void set_normalization_enabled(bool b); /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- APPLY template<typename TImage> [[nodiscard]] typename TImage::template self_template_type<std::complex<double>> apply(const TImage& img) { const unsigned int nDims = img.num_dimensions(); assert(nDims >= 1 && nDims <= 4 && "fft is only implemented for 1/2/3/4D images"); const auto size = img.size(); /* * make each image dimension size a power of 2 */ std::vector<unsigned int> sizePow2(nDims, 1); unsigned int newNumValues = 1; _off.resize(nDims); _orig_size_uneven.resize(nDims); for (unsigned int dimId = 0; dimId < nDims; ++dimId) { unsigned int& p = sizePow2[dimId]; while (p < size[dimId]) { p <<= 1; } newNumValues *= p; _off[dimId] = (sizePow2[dimId] - size[dimId]) / 2; _orig_size_uneven[dimId] = (sizePow2[dimId] - size[dimId]) % 2; } // for dimId /* * create result image */ typename TImage::template self_template_type<std::complex<double>> fftimg; fftimg.set_size(sizePow2); #pragma omp parallel for for (unsigned int i = 0; i < newNumValues; ++i) { auto gid = bk::list_to_grid_id(sizePow2, i); bool insideOriginalImage = true; for (unsigned int dimId = 0; dimId < nDims; ++dimId) { if (gid[dimId] < _off[dimId] || gid[dimId] >= size[dimId] + _off[dimId]) { insideOriginalImage = false; break; } } if (!insideOriginalImage) { fftimg[i].real(0); continue; } auto gidoff = gid; for (unsigned int dimId = 0; dimId < nDims; ++dimId) { gidoff[dimId] -= _off[dimId]; } fftimg[i].real(img(gidoff)); } // for i std::complex<double>* rawDataPtr = fftimg.data().data().data(); switch (nDims) { case 1: { FFT1D(rawDataPtr, sizePow2[0], _normalization); break; } case 2: { FFT2D(rawDataPtr, sizePow2[0], sizePow2[1], _normalization); break; } case 3: { FFT3D(rawDataPtr, sizePow2[0], sizePow2[1], sizePow2[2], _normalization); break; } case 4: { FFT4D(rawDataPtr, sizePow2[0], sizePow2[1], sizePow2[2], sizePow2[3], _normalization); break; } } return fftimg; } /// @} }; // class FFTImageFilter } // namespace bk #endif //BK_FFTIMAGEFILTER_H
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include <LightGBM/json11.hpp> #include "score_updater.hpp" using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); int i = 0; // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); i++; } std::cout << "from MergeFrom: other_tree_num =" << i << std::endl; i = 0; num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); i++; } std::cout << "from MergeFrom: original_tree_num =" << i << std::endl; num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void MyTrain(int snapshot_freq, const std::string& model_output_path, const std::string& input_model_path, int num_iterations, int erase_pos) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief Helper function for bagging, used for multi-threading optimization, balanced sampling * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BalancedBaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
ParallelJobsOpenMP.h
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef ParallelJobsOpenMP_h #define ParallelJobsOpenMP_h #if ENABLE(THREADING_OPENMP) #include <omp.h> namespace WTF { class ParallelEnvironment { WTF_MAKE_NONCOPYABLE(ParallelEnvironment); public: typedef void (*ThreadFunction)(void*); ParallelEnvironment(ThreadFunction threadFunction, size_t sizeOfParameter, int requestedJobNumber) : m_threadFunction(threadFunction), m_sizeOfParameter(sizeOfParameter) { int maxNumberOfThreads = omp_get_max_threads(); if (!requestedJobNumber || requestedJobNumber > maxNumberOfThreads) requestedJobNumber = maxNumberOfThreads; ASSERT(requestedJobNumber > 0); m_numberOfJobs = requestedJobNumber; } int numberOfJobs() { return m_numberOfJobs; } void execute(unsigned char* parameters) { omp_set_num_threads(m_numberOfJobs); #pragma omp parallel for for (int i = 0; i < m_numberOfJobs; ++i) (*m_threadFunction)(parameters + i * m_sizeOfParameter); } private: ThreadFunction m_threadFunction; size_t m_sizeOfParameter; int m_numberOfJobs; }; } // namespace WTF #endif // ENABLE(THREADING_OPENMP) #endif // ParallelJobsOpenMP_h
openmp_4.tfm.c
void foo(int N, double *A) { int J; #pragma omp parallel { #pragma omp for default(shared) private(J) for (int I = 0; I < N; ++I) { J = N + I; A[I] = I + J; } } }
convolutiondepthwise_5x5_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { #if __aarch64__ const int w = bottom_blob.w; #endif const int outw = top_blob.w; const int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const unsigned short* kptr = kernel.row<const unsigned short>(g); unsigned short* outptr0 = out.row<unsigned short>(0); const Mat img0 = bottom_blob.channel(g); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); #if __aarch64__ unsigned short* outptr1 = out.row<unsigned short>(1); const unsigned short* r5 = img0.row<const unsigned short>(5); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); // 4 * 25 uint16x8_t _k00_01 = vld1q_u16(kptr); uint16x8_t _k02_03 = vld1q_u16(kptr + 8); uint16x8_t _k04_10 = vld1q_u16(kptr + 16); uint16x8_t _k11_12 = vld1q_u16(kptr + 24); uint16x8_t _k13_14 = vld1q_u16(kptr + 32); uint16x8_t _k20_21 = vld1q_u16(kptr + 40); uint16x8_t _k22_23 = vld1q_u16(kptr + 48); uint16x8_t _k24_30 = vld1q_u16(kptr + 56); uint16x8_t _k31_32 = vld1q_u16(kptr + 64); uint16x8_t _k33_34 = vld1q_u16(kptr + 72); uint16x8_t _k40_41 = vld1q_u16(kptr + 80); uint16x8_t _k42_43 = vld1q_u16(kptr + 88); uint16x4_t _k44 = vld1_u16(kptr + 96); #else // __aarch64__ float bias0_data[4]; if (bias) { bias0_data[0] = bias[g * 4 + 0]; bias0_data[1] = bias[g * 4 + 1]; bias0_data[2] = bias[g * 4 + 2]; bias0_data[3] = bias[g * 4 + 3]; } else { bias0_data[0] = 0.f; bias0_data[1] = 0.f; bias0_data[2] = 0.f; bias0_data[3] = 0.f; } const float* bias0_data_ptr = bias0_data; #endif // __aarch64__ int i = 0; #if __aarch64__ for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r10 r11 r12 r13 "shll2 v14.4s, %18.8h, #16 \n" "mov v24.16b, %29.16b \n" // sum00 "mov v25.16b, %29.16b \n" // sum01 "mov v26.16b, %29.16b \n" // sum02 "mov v27.16b, %29.16b \n" // sum03 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v28.16b, %29.16b \n" // sum10 "mov v29.16b, %29.16b \n" // sum11 "mov v30.16b, %29.16b \n" // sum12 "mov v31.16b, %29.16b \n" // sum13 "shll v15.4s, %16.4h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n" // r14 r15 r16 r17 "fmla v27.4s, v14.4s, v19.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %19.8h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %17.4h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll2 v14.4s, %20.8h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r20 r21 r22 r23 "fmla v31.4s, v15.4s, v22.4s \n" "shll v15.4s, %18.4h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r24 r25 r26 r27 "fmla v27.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %21.8h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %19.4h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll2 v14.4s, %22.8h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v15.4s, %20.4h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r30 r31 r32 r33 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll2 v14.4s, %23.8h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll v15.4s, %21.4h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n" // r34 r35 r36 r37 "fmla v27.4s, v14.4s, v19.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %24.8h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %22.4h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll v14.4s, %25.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll2 v14.4s, %25.8h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" // r40 r41 r42 r43 "fmla v31.4s, v15.4s, v22.4s \n" "shll v15.4s, %23.4h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll v14.4s, %26.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%6] \n" // r44 r45 r46 r47 "fmla v27.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %24.4h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll v14.4s, %27.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %24.8h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v15.4s, %25.4h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll v14.4s, %28.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r00 r01 r02 r03 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %25.8h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r04 r05 r06 r07 "fmla v27.4s, v14.4s, v19.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v24.4s, v15.4s, v17.4s \n" "fmla v25.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v15.4s, v19.4s \n" "fmla v27.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v24.4s, v15.4s, v19.4s \n" "fmla v25.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" // r50 r51 r52 r53 "fmla v27.4s, v15.4s, v22.4s \n" "shll v15.4s, %26.4h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%7] \n" // r54 r55 r56 r57 "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %27.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "fmla v31.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v15.4s, %28.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "fmla v31.4s, v14.4s, v22.4s \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k00_01), // %16 "w"(_k02_03), // %17 "w"(_k04_10), // %18 "w"(_k11_12), // %19 "w"(_k13_14), // %20 "w"(_k20_21), // %21 "w"(_k22_23), // %22 "w"(_k24_30), // %23 "w"(_k31_32), // %24 "w"(_k33_34), // %25 "w"(_k40_41), // %26 "w"(_k42_43), // %27 "w"(_k44), // %28 "w"(_bias0) // %29 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #128] \n" "ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r10 r11 "shll2 v14.4s, %18.8h, #16 \n" "mov v28.16b, %29.16b \n" // sum00 "mov v29.16b, %29.16b \n" // sum01 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v30.16b, %29.16b \n" // sum10 "mov v31.16b, %29.16b \n" // sum11 "prfm pldl1keep, [%3, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n" // r12 r13 r14 r15 "fmla v28.4s, v14.4s, v16.4s \n" "shll v15.4s, %16.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %19.8h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %17.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r20 r21 "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %20.8h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %18.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n" // r22 r23 r24 r25 "fmla v28.4s, v14.4s, v16.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %21.8h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %19.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r30 r31 "fmla v29.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %22.8h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %20.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll2 v14.4s, %23.8h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n" // r32 r33 r34 r35 "fmla v28.4s, v14.4s, v16.4s \n" "shll v15.4s, %21.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %24.8h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %22.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v16.4h, v17.4h}, [%6], #16 \n" // r40 r41 "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %25.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %25.8h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %23.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %26.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%6] \n" // r42 r43 r44 r45 "fmla v28.4s, v14.4s, v16.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %24.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %27.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %24.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r00 r01 "fmla v29.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %25.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v22.4h, v23.4h}, [%7], #16 \n" // r50 r51 "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %28.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %25.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n" // r02 r03 r04 r05 "shll v23.4s, v23.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v15.4s, %26.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7] \n" // r52 r53 r54 r55 "shll2 v14.4s, %16.8h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %26.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll v15.4s, %27.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %17.8h, #16 \n" "fmla v30.4s, v15.4s, v24.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v25.4s \n" "shll2 v15.4s, %27.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v30.4s, v15.4s, v25.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v26.4s \n" "shll v15.4s, %28.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v27.4s, v27.4h, #16 \n" "fmla v30.4s, v15.4s, v26.4s \n" "fmla v31.4s, v15.4s, v27.4s \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v28.4h, v29.4h}, [%0], #16 \n" "st1 {v30.4h, v31.4h}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k00_01), // %16 "w"(_k02_03), // %17 "w"(_k04_10), // %18 "w"(_k11_12), // %19 "w"(_k13_14), // %20 "w"(_k20_21), // %21 "w"(_k22_23), // %22 "w"(_k24_30), // %23 "w"(_k31_32), // %24 "w"(_k33_34), // %25 "w"(_k40_41), // %26 "w"(_k42_43), // %27 "w"(_k44), // %28 "w"(_bias0) // %29 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%3, #64] \n" "ld1 {v16.4h}, [%3], #8 \n" // r10 "prfm pldl1keep, [%3, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n" // r11 r12 r13 r14 "mov v30.16b, %29.16b \n" // sum00 "mov v31.16b, %29.16b \n" // sum10 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "shll2 v14.4s, %18.8h, #16 \n" "shll v15.4s, %16.4h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmul v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %19.8h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %17.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %20.8h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %18.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v16.4h}, [%4], #8 \n" // r20 "prfm pldl1keep, [%4, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n" // r21 r22 r23 r24 "shll2 v15.4s, %18.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll2 v14.4s, %21.8h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll v15.4s, %19.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %22.8h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %20.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %23.8h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v16.4h}, [%5], #8 \n" // r30 "prfm pldl1keep, [%5, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n" // r31 r32 r33 r34 "shll v15.4s, %21.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %24.8h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %22.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %25.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %25.8h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %23.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v14.4s, %26.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%6, #64] \n" "ld1 {v16.4h}, [%6], #8 \n" // r40 "prfm pldl1keep, [%6, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%6] \n" // r41 r42 r43 r44 "shll2 v15.4s, %23.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll v15.4s, %24.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %27.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %24.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %25.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %28.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %25.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v16.4h}, [%2], #8 \n" // r00 "prfm pldl1keep, [%7, #64] \n" "ld1 {v21.4h}, [%7], #8 \n" // r50 "prfm pldl1keep, [%7, #256] \n" "ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [%7] \n" // r51 r52 r53 r54 "prfm pldl1keep, [%2, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n" // r01 r02 r03 r04 "shll v15.4s, %26.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll2 v14.4s, %16.8h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %26.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "shll v15.4s, %27.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %17.8h, #16 \n" "fmla v29.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %27.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll v15.4s, %28.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v15.4s, v25.4s \n" "fadd v30.4s, v30.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v30.4h}, [%0], #8 \n" "st1 {v31.4h}, [%1], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k00_01), // %16 "w"(_k02_03), // %17 "w"(_k04_10), // %18 "w"(_k11_12), // %19 "w"(_k13_14), // %20 "w"(_k20_21), // %21 "w"(_k22_23), // %22 "w"(_k24_30), // %23 "w"(_k31_32), // %24 "w"(_k33_34), // %25 "w"(_k40_41), // %26 "w"(_k42_43), // %27 "w"(_k44), // %28 "w"(_bias0) // %29 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } r0 += 4 * 4 + w * 4; r1 += 4 * 4 + w * 4; r2 += 4 * 4 + w * 4; r3 += 4 * 4 + w * 4; r4 += 4 * 4 + w * 4; r5 += 4 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03 "shll v14.4s, %12.4h, #16 \n" "mov v28.16b, %25.16b \n" // sum00 "mov v29.16b, %25.16b \n" // sum01 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v30.16b, %25.16b \n" // sum02 "mov v31.16b, %25.16b \n" // sum03 "shll2 v15.4s, %12.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1] \n" // r04 r05 r06 r07 "fmla v31.4s, v14.4s, v19.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "fmla v31.4s, v14.4s, v21.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %15.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r14 r15 r16 r17 "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "fmla v31.4s, v14.4s, v20.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23 "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n" // r24 r25 r26 r27 "fmla v31.4s, v14.4s, v19.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "fmla v31.4s, v14.4s, v21.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r34 r35 r36 r37 "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "fmla v31.4s, v14.4s, v20.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43 "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n" // r44 r45 r46 r47 "fmla v31.4s, v14.4s, v19.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "fmla v31.4s, v14.4s, v21.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v22.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v23.4s \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vmov q13, q12 \n" // sum0 sum1 "vmov q14, q12 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vmov q15, q12 \n" // sum2 sum3 "vmla.f32 q12, q8, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q8, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, q2 \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64] \n" // r04 r05 r06 r07 "vmla.f32 q15, q8, q3 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "vmla.f32 q13, q9, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q9, q3 \n" "vmla.f32 q15, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q12, q10, q2 \n" "vmla.f32 q13, q10, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q5 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q11, q5 \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13 "vmla.f32 q15, q11, q6 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q12, q10, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q10, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q10, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q10, q7 \n" "vshll.u16 q8, d18, #16 \n" // k11 "vmla.f32 q12, q11, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q11, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q11, q2 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64] \n" // r14 r15 r16 r17 "vmla.f32 q15, q11, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q12, q8, q1 \n" "vmla.f32 q13, q8, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q8, q3 \n" "vmla.f32 q15, q8, q4 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q9, q4 \n" "vmla.f32 q15, q9, q5 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q12, q8, q3 \n" "vmla.f32 q13, q8, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q8, q5 \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23 "vmla.f32 q15, q8, q6 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q12, q9, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q9, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q9, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q9, q7 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k21 "vmla.f32 q12, q10, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q10, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q2 \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64] \n" // r24 r25 r26 r27 "vmla.f32 q15, q10, q3 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q11, q3 \n" "vmla.f32 q15, q11, q4 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q12, q10, q2 \n" "vmla.f32 q13, q10, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q5 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q11, q5 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33 "vmla.f32 q15, q11, q6 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q12, q8, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q8, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q8, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q8, q7 \n" "vshll.u16 q8, d20, #16 \n" // k31 "vmla.f32 q12, q9, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q9, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q9, q2 \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64] \n" // r34 r35 r36 r37 "vmla.f32 q15, q9, q3 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q12, q8, q1 \n" "vmla.f32 q13, q8, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q8, q3 \n" "vmla.f32 q15, q8, q4 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q12, q9, q2 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q9, q4 \n" "vmla.f32 q15, q9, q5 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q3 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q13, q10, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q10, q5 \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43 "vmla.f32 q15, q10, q6 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q11, q7 \n" "vshll.u16 q11, d17, #16 \n" // k41 "vmla.f32 q12, q10, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q10, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q2 \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6 :64] \n" // r44 r45 r46 r47 "vmla.f32 q15, q10, q3 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q11, q3 \n" "vmla.f32 q15, q11, q4 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vmla.f32 q12, q8, q2 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q13, q8, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q5 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q9, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q9, q5 \n" "vmla.f32 q15, q9, q6 \n" "vmla.f32 q12, q8, q4 \n" "vmla.f32 q13, q8, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q8, q6 \n" "vmla.f32 q15, q8, q7 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v16.4h, v17.4h}, [%1], #16 \n" // r00 r01 "prfm pldl1keep, [%1, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%1] \n" // r02 r03 r04 r05 "shll v14.4s, %12.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v30.16b, %25.16b \n" // sum01 "mov v31.16b, %25.16b \n" // sum02 "shll2 v15.4s, %12.8h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmul v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r10 r11 "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n" // r12 r13 r14 r15 "shll v14.4s, %15.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r20 r21 "shll v14.4s, %16.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n" // r22 r23 r24 r25 "shll2 v15.4s, %17.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r30 r31 "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n" // r32 r33 r34 r35 "shll v14.4s, %20.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r40 r41 "shll v14.4s, %21.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n" // r42 r43 r44 r45 "shll2 v15.4s, %22.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v21.4s \n" "fadd v30.4s, v30.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v30.4h, v31.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "pld [%2, #256] \n" "vld1.u16 {d8-d11}, [%2 :64] \n" // r02 r03 r04 r05 "vshll.u16 q0, d2, #16 \n" "vmov q13, q12 \n" // sum0 sum1 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vmul.f32 q14, q8, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmul.f32 q15, q8, q1 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "pld [%3, #128] \n" "vld1.u16 {d2-d3}, [%3 :64]! \n" // r10 r11 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q14, q10, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q10, q3 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, q4 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q14, q10, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q15, q10, q5 \n" "pld [%3, #256] \n" "vld1.u16 {d8-d11}, [%3 :64] \n" // r12 r13 r14 r15 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q8, d18, #16 \n" // k11 "vmla.f32 q12, q11, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, q1 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q14, q8, q1 \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q15, q8, q2 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q14, q8, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q8, q4 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q12, q9, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q9, q5 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "pld [%4, #256] \n" "vld1.u16 {d8-d11}, [%4 :64] \n" // r22 r23 r24 r25 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q11, d23, #16 \n" // k21 "vmla.f32 q14, q10, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q15, q10, q1 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "pld [%5, #128] \n" "vld1.u16 {d2-d3}, [%5 :64]! \n" // r30 r31 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, q2 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q14, q10, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q10, q3 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q14, q8, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q15, q8, q5 \n" "pld [%5, #256] \n" "vld1.u16 {d8-d11}, [%5 :64] \n" // r32 r33 r34 r35 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q8, d20, #16 \n" // k31 "vmla.f32 q12, q9, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q9, q1 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q14, q8, q1 \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q15, q8, q2 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q14, q10, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q10, q4 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q11, q5 \n" "pld [%6, #256] \n" "vld1.u16 {d8-d11}, [%6 :64] \n" // r42 r43 r44 r45 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q11, d17, #16 \n" // k41 "vmla.f32 q14, q10, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q15, q10, q1 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, q2 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q14, q8, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q8, q3 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q9, q4 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q5 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vst1.u16 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v16.4h}, [%1], #8 \n" // r00 "prfm pldl1keep, [%1, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%1] \n" // r01 r02 r03 r04 "shll v14.4s, %12.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "mov v31.16b, %25.16b \n" // sum01 "shll2 v15.4s, %12.8h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmul v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmul v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v16.4h}, [%2], #8 \n" // r10 "prfm pldl1keep, [%2, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n" // r11 r12 r13 r14 "shll v14.4s, %15.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v16.4h}, [%3], #8 \n" // r20 "prfm pldl1keep, [%3, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n" // r21 r22 r23 r24 "shll2 v15.4s, %17.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v16.4h}, [%4], #8 \n" // r30 "prfm pldl1keep, [%4, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n" // r31 r32 r33 r34 "shll v14.4s, %20.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v16.4h}, [%5], #8 \n" // r40 "prfm pldl1keep, [%5, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n" // r41 r42 r43 r44 "shll2 v15.4s, %22.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fadd v29.4s, v29.4s, v30.4s \n" "fadd v31.4s, v31.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v31.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" // sum0 "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n" // r00 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%2, #256] \n" "vld1.u16 {d6-d9}, [%2 :64] \n" // r01 r02 r03 r04 "vshll.u16 q0, d1, #16 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vshll.u16 q1, d6, #16 \n" "vmul.f32 q13, q8, q0 \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3 :64]! \n" // r10 "vshll.u16 q2, d7, #16 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmul.f32 q14, q9, q1 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmul.f32 q15, q10, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q13, q10, q4 \n" "pld [%3, #256] \n" "vld1.u16 {d6-d9}, [%3 :64] \n" // r11 r12 r13 r14 "vshll.u16 q8, d18, #16 \n" // k11 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q14, q11, q0 \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4 :64]! \n" // r20 "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q15, q8, q1 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q13, q8, q3 \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q14, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "pld [%4, #256] \n" "vld1.u16 {d6-d9}, [%4 :64] \n" // r21 r22 r23 r24 "vshll.u16 q11, d23, #16 \n" // k21 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q15, q10, q0 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" // r30 "vshll.u16 q2, d7, #16 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q13, q10, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q14, q11, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q15, q8, q4 \n" "pld [%5, #256] \n" "vld1.u16 {d6-d9}, [%5 :64] \n" // r31 r32 r33 r34 "vshll.u16 q8, d20, #16 \n" // k31 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q12, q9, q0 \n" "pld [%6, #64] \n" "vld1.u16 {d1}, [%6 :64]! \n" // r40 "vshll.u16 q2, d7, #16 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q13, q8, q1 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q14, q9, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q15, q10, q3 \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "pld [%6, #256] \n" "vld1.u16 {d6-d9}, [%6 :64] \n" // r41 r42 r43 r44 "vshll.u16 q11, d17, #16 \n" // k41 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q13, q10, q0 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q14, q11, q1 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q15, q8, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q8, q4 \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q12, q12, q13 \n" "vadd.f32 q12, q12, q14 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; } } } static void convdw5x5s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); #if __aarch64__ float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); #endif // __aarch64__ const unsigned short* kptr = kernel.row<const unsigned short>(g); unsigned short* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); #if __aarch64__ // 4 * 25 uint16x8_t _k00_01 = vld1q_u16(kptr); uint16x8_t _k02_03 = vld1q_u16(kptr + 8); uint16x8_t _k04_10 = vld1q_u16(kptr + 16); uint16x8_t _k11_12 = vld1q_u16(kptr + 24); uint16x8_t _k13_14 = vld1q_u16(kptr + 32); uint16x8_t _k20_21 = vld1q_u16(kptr + 40); uint16x8_t _k22_23 = vld1q_u16(kptr + 48); uint16x8_t _k24_30 = vld1q_u16(kptr + 56); uint16x8_t _k31_32 = vld1q_u16(kptr + 64); uint16x8_t _k33_34 = vld1q_u16(kptr + 72); uint16x8_t _k40_41 = vld1q_u16(kptr + 80); uint16x8_t _k42_43 = vld1q_u16(kptr + 88); uint16x4_t _k44 = vld1_u16(kptr + 96); #else // __aarch64__ float bias0_data[4]; if (bias) { bias0_data[0] = bias[g * 4 + 0]; bias0_data[1] = bias[g * 4 + 1]; bias0_data[2] = bias[g * 4 + 2]; bias0_data[3] = bias[g * 4 + 3]; } else { bias0_data[0] = 0.f; bias0_data[1] = 0.f; bias0_data[2] = 0.f; bias0_data[3] = 0.f; } const float* bias0_data_ptr = bias0_data; #endif // __aarch64__ int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" // r04 r05 r06 r07 "shll v14.4s, %12.4h, #16 \n" "mov v28.16b, %25.16b \n" // sum00 "mov v29.16b, %25.16b \n" // sum01 "mov v30.16b, %25.16b \n" // sum02 "mov v31.16b, %25.16b \n" // sum03 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "prfm pldl1keep, [%1, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%1] \n" // r08 r09 r010 "shll2 v15.4s, %12.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v24.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13 "fmla v29.4s, v15.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v25.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r14 r15 r16 r17 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v14.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v14.4s, v26.4s \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%2] \n" // r18 r19 r110 "shll v14.4s, %15.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23 "fmla v29.4s, v14.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v14.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v14.4s, v25.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" // r24 r25 r26 r27 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v15.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v26.4s \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%3] \n" // r28 r29 r210 "shll2 v15.4s, %17.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v24.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33 "fmla v29.4s, v15.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v25.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4], #32 \n" // r34 r35 r36 r37 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v14.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v14.4s, v26.4s \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%4] \n" // r38 r39 r310 "shll v14.4s, %20.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43 "fmla v29.4s, v14.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v14.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v14.4s, v25.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n" // r44 r45 r46 r47 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v15.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v26.4s \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n" // r48 r49 r410 "shll2 v15.4s, %22.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v24.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "fmla v31.4s, v15.4s, v25.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v22.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v14.4s, v24.4s \n" "fmla v31.4s, v14.4s, v26.4s \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "vmov q13, q12 \n" // sum0 sum1 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03 "vmov q14, q12 \n" "vmov q15, q12 \n" // sum2 sum3 "vshll.u16 q9, d21, #16 \n" // k01 "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64]! \n" // r04 r05 r06 r07 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q12, q8, q0 \n" "vmla.f32 q13, q8, q2 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q6 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q9, q5 \n" "vmla.f32 q15, q9, q7 \n" "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n" // r08 r09 "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q2 \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q13, q10, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q14, q10, q6 \n" "vmla.f32 q15, q10, q0 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q14, q11, q7 \n" "vmla.f32 q15, q11, q1 \n" "pld [%2, #64] \n" "vld1.u16 {d5}, [%2 :64] \n" // r010 "vmla.f32 q12, q10, q4 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q13, q10, q6 \n" "vshll.u16 q2, d5, #16 \n" "vmla.f32 q14, q10, q0 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64]! \n" // r10 r11 r12 r13 "vmla.f32 q15, q10, q2 \n" "vshll.u16 q8, d18, #16 \n" // k11 "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r14 r15 r16 r17 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q12, q11, q4 \n" "vmla.f32 q13, q11, q6 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q14, q11, q0 \n" "vmla.f32 q15, q11, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q12, q8, q5 \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q13, q8, q7 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, q1 \n" "vmla.f32 q15, q8, q3 \n" "pld [%3, #128] \n" "vld1.u16 {d10-d11}, [%3 :64]! \n" // r18 r19 "vmla.f32 q12, q9, q6 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q13, q9, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q14, q9, q2 \n" "vmla.f32 q15, q9, q4 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q12, q8, q7 \n" "vmla.f32 q13, q8, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q14, q8, q3 \n" "vmla.f32 q15, q8, q5 \n" "pld [%3, #64] \n" "vld1.u16 {d13}, [%3 :64] \n" // r110 "vmla.f32 q12, q9, q0 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q13, q9, q2 \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q14, q9, q4 \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23 "vmla.f32 q15, q9, q6 \n" "vshll.u16 q11, d23, #16 \n" // k21 "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" // r24 r25 r26 r27 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q0 \n" "vmla.f32 q13, q10, q2 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q6 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q3 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, q5 \n" "vmla.f32 q15, q11, q7 \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n" // r28 r29 "vmla.f32 q12, q10, q2 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q13, q10, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q14, q10, q6 \n" "vmla.f32 q15, q10, q0 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q14, q11, q7 \n" "vmla.f32 q15, q11, q1 \n" "pld [%4, #64] \n" "vld1.u16 {d5}, [%4 :64] \n" // r210 "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q12, q8, q4 \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q13, q8, q6 \n" "vshll.u16 q2, d5, #16 \n" "vmla.f32 q14, q8, q0 \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64]! \n" // r30 r31 r32 r33 "vmla.f32 q15, q8, q2 \n" "vshll.u16 q8, d20, #16 \n" // k31 "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" // r34 r35 r36 r37 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q12, q9, q4 \n" "vmla.f32 q13, q9, q6 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q14, q9, q0 \n" "vmla.f32 q15, q9, q2 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q12, q8, q5 \n" "vmla.f32 q13, q8, q7 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, q1 \n" "vmla.f32 q15, q8, q3 \n" "pld [%5, #128] \n" "vld1.u16 {d10-d11}, [%5 :64]! \n" // r38 r39 "vmla.f32 q12, q9, q6 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q13, q9, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q14, q9, q2 \n" "vmla.f32 q15, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q7 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q13, q10, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q14, q10, q3 \n" "vmla.f32 q15, q10, q5 \n" "pld [%5, #64] \n" "vld1.u16 {d13}, [%5 :64] \n" // r310 "vmla.f32 q12, q11, q0 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q13, q11, q2 \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q14, q11, q4 \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43 "vmla.f32 q15, q11, q6 \n" "vshll.u16 q11, d17, #16 \n" // k41 "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6 :64]! \n" // r44 r45 r46 r47 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q12, q10, q0 \n" "vmla.f32 q13, q10, q2 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q6 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q3 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, q5 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vmla.f32 q15, q11, q7 \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n" // r48 r49 "vmla.f32 q12, q8, q2 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q13, q8, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q14, q8, q6 \n" "vmla.f32 q15, q8, q0 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q9, q5 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q14, q9, q7 \n" "vmla.f32 q15, q9, q1 \n" "pld [%6, #64] \n" "vld1.u16 {d5}, [%6 :64] \n" // r410 "vmla.f32 q12, q8, q4 \n" "vmla.f32 q13, q8, q6 \n" "vshll.u16 q2, d5, #16 \n" "vmla.f32 q14, q8, q0 \n" "vmla.f32 q15, q8, q2 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" "sub %5, %5, #16 \n" "sub %6, %6, #16 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%1] \n" // r04 r05 r06 "shll v14.4s, %12.4h, #16 \n" "shll2 v15.4s, %12.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "mov v30.16b, %25.16b \n" // sum00 "mov v31.16b, %25.16b \n" // sum01 "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmul v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13 "shll v22.4s, v22.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%2] \n" // r14 r15 r16 "shll v14.4s, %15.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23 "shll v22.4s, v22.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%3] \n" // r24 r25 r26 "shll2 v15.4s, %17.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33 "shll v22.4s, v22.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n" // r34 r35 r36 "shll v14.4s, %20.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43 "shll v22.4s, v22.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%5] \n" // r44 r45 r46 "shll2 v15.4s, %22.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v22.4s \n" "fadd v30.4s, v30.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v30.4h, v31.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%2, #256] \n" "vld1.u16 {d10-d12}, [%2 :64] \n" // r04 r05 r06 "vmov q13, q12 \n" // sum0 sum1 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vmul.f32 q14, q8, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmul.f32 q15, q8, q2 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q14, q10, q2 \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q15, q10, q4 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q6 \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13 "vshll.u16 q8, d18, #16 \n" // k11 "pld [%3, #256] \n" "vld1.u16 {d10-d12}, [%3 :64] \n" // r14 r15 r16 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q11, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q14, q8, q1 \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q15, q8, q3 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, q4 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q14, q8, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q15, q8, q5 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q12, q9, q4 \n" "vmla.f32 q13, q9, q6 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23 "vshll.u16 q11, d23, #16 \n" // k21 "pld [%4, #256] \n" "vld1.u16 {d10-d12}, [%4 :64] \n" // r24 r25 r26 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q0 \n" "vmla.f32 q15, q10, q2 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, q3 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q14, q10, q2 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q10, q4 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q11, q5 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q14, q8, q4 \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q15, q8, q6 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33 "vshll.u16 q8, d20, #16 \n" // k31 "pld [%5, #256] \n" "vld1.u16 {d10-d12}, [%5 :64] \n" // r34 r35 r36 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q9, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, q2 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q14, q8, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q8, q3 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q14, q10, q3 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q15, q10, q5 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "vmla.f32 q13, q11, q6 \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43 "vshll.u16 q11, d17, #16 \n" // k41 "pld [%6, #256] \n" "vld1.u16 {d10-d12}, [%6 :64] \n" // r44 r45 r46 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q10, q2 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, q3 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vmla.f32 q14, q8, q2 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q15, q8, q4 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q9, q5 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q6 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vst1.u16 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v16.4h, v17.4h}, [%1], #16 \n" // r00 r01 "prfm pldl1keep, [%1, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%1] \n" // r02 r03 r04 "shll v14.4s, %12.4h, #16 \n" "mov v31.16b, %25.16b \n" // sum00 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "shll2 v15.4s, %12.8h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmul v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmul v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r10 r11 "prfm pldl1keep, [%2, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%2] \n" // r12 r13 r14 "shll v14.4s, %15.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r20 r21 "prfm pldl1keep, [%3, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%3] \n" // r22 r23 r24 "shll2 v15.4s, %17.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r30 r31 "prfm pldl1keep, [%4, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%4] \n" // r32 r33 r34 "shll v14.4s, %20.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r40 r41 "prfm pldl1keep, [%5, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%5] \n" // r42 r43 r44 "shll2 v15.4s, %22.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fadd v29.4s, v29.4s, v30.4s \n" "fadd v31.4s, v31.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v31.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01 "pld [%2, #192] \n" "vld1.u16 {d6-d8}, [%2 :64] \n" // r02 r03 r04 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" // k00 "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" // sum0 "vshll.u16 q9, d21, #16 \n" // k01 "vmul.f32 q13, q8, q0 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmul.f32 q14, q9, q1 \n" "pld [%3, #128] \n" "vld1.u16 {d14-d15}, [%3 :64]! \n" // r10 r11 "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmul.f32 q15, q10, q2 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q13, q10, q4 \n" "pld [%3, #192] \n" "vld1.u16 {d8-d10}, [%3 :64] \n" // r12 r13 r14 "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q8, d18, #16 \n" // k11 "vmla.f32 q14, q11, q6 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q15, q8, q7 \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21 "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q5, d10, #16 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q3 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q13, q8, q4 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q14, q9, q5 \n" "pld [%4, #192] \n" "vld1.u16 {d6-d8}, [%4 :64] \n" // r22 r23 r24 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k21 "vmla.f32 q15, q10, q0 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "pld [%5, #128] \n" "vld1.u16 {d14-d15}, [%5 :64]! \n" // r30 r31 "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q13, q10, q2 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q14, q11, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q15, q8, q4 \n" "pld [%5, #192] \n" "vld1.u16 {d8-d10}, [%5 :64] \n" // r32 r33 r34 "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q8, d20, #16 \n" // k31 "vmla.f32 q12, q9, q6 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q13, q8, q7 \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41 "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q5, d10, #16 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q14, q9, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q15, q10, q4 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q5 \n" "pld [%6, #192] \n" "vld1.u16 {d6-d8}, [%6 :64] \n" // r42 r43 r44 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q11, d17, #16 \n" // k41 "vmla.f32 q13, q10, q0 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q14, q11, q1 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q15, q8, q2 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q8, q4 \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q12, q12, q13 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vadd.f32 q12, q12, q14 \n" "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
citrix_ns_fmt_plug.c
/* * Description from Nicolas Ruff: * - Salt value is hashed as an hexadecimal string, not bytes. * - The trailing NULL byte of password string is taken into account during * hashing. * - The leading '1' is actually the string length * '1' = 49 = len('1') + len(hex_salt) + len(hex_sha1) * * --------------------------------------- * import hashlib * * def netscaler_hash( rand_bytes, pwd ): * s = hashlib.sha1() * s.update( rand_bytes ) * s.update( pwd ) * return "1" + rand_bytes + s.hexdigest() * * # TEST VECTOR * # 14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f * * rand_bytes = "4dfca1e6" * pwd = "nsroot\x00" * print netscaler_hash( rand_bytes, pwd ) * --------------------------------------- * * This software is Copyright (c) 2013 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * This version is hard coded for salt length 8 (for speed). */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ctrxns; #elif FMT_REGISTERS_H john_register_one(&fmt_ctrxns); #else #include <string.h> #ifdef _OPENMP #ifdef MMX_COEF #define OMP_SCALE 1024 #else #define OMP_SCALE 2048 #endif #include <omp.h> #endif #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "johnswap.h" #ifdef MMX_COEF #define NBKEYS (MMX_COEF * SHA1_SSE_PARA) #endif #include "sse-intrinsics.h" #include "common.h" #include "sha.h" #include "memdbg.h" // Must be last included header #define FORMAT_LABEL "Citrix_NS10" #define FORMAT_NAME "Netscaler 10" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH (55 - SALT_SIZE - 1) #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ((index & (MMX_COEF - 1)) * 4 + ((i) & (0xffffffff - 3)) * MMX_COEF + (((i) & 3) ^ 3) + (index >> (MMX_COEF >> 1)) * SHA_BUF_SIZ * MMX_COEF * 4) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"100000000f1dc96f425971ba590a076fd0f8bccbf25c1ba0c", ""}, {"14623718525fe334bbd9c0704e06ce134ef17b51f6b33548c", " "}, {"15c5c5c5c6ccd884f6383f55a6aeba5f847775e57ab012675", "Tw"}, {"13333333319143136ba9ff9e18d1cb022b63df0926de9509e", "333"}, {"144434241d7ce89a7484cd202400639692258dde37efc29c5", "four"}, {"100010203e09cefed1847b7a2a5e7a5d2cdc67e8a56ed0bdd", "fiver"}, {"14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f", "nsroot"}, {"1deadcafe7587ea23b25a6ccf3fd53192e36ad3e9a2553b20", "magnum!"}, {NULL} }; #ifdef MMX_COEF static unsigned char (*saved_key)[SHA_BUF_SIZ * 4 * NBKEYS]; static unsigned char (*crypt_key)[BINARY_SIZE * NBKEYS]; static int kpc; #else static char saved_salt[SALT_SIZE]; static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef MMX_COEF saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt / NBKEYS, MEM_ALIGN_SIMD); crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt / NBKEYS, MEM_ALIGN_SIMD); kpc = self->params.max_keys_per_crypt; #else saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #endif } static void *binary(char *ciphertext) { static unsigned char *realcipher; int i, len; if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); len = *ciphertext; ciphertext += len - 2 * BINARY_SIZE; for(i = 0; i < BINARY_SIZE; i++) { realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } #ifdef MMX_COEF alter_endianity(realcipher, BINARY_SIZE); #endif return (void*)realcipher; } static int valid(char *ciphertext, struct fmt_main *self) { int len; len = *ciphertext; if (len != (int)'1') return 0; if (strlen(ciphertext) != len) return 0; if (len != strspn(ciphertext, "0123456789abcdef")) return 0; return 1; } static void set_key(char *key, int index) { #ifdef MMX_COEF const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key; ARCH_WORD_32 *keybuf_word = (ARCH_WORD_32*)&saved_key[0][GETPOS(SALT_SIZE ^ 3, index)]; unsigned int len; ARCH_WORD_32 temp; len = SALT_SIZE; while((temp = *wkey++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 16)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 24)); len+=2; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); keybuf_word += MMX_COEF; if (!(temp & 0xff000000)) { *keybuf_word = 0x80000000; len+=3; goto key_cleaning; } len += 4; } *keybuf_word = 0x00800000; key_cleaning: keybuf_word += MMX_COEF; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } len += 1; /* Trailing null is included */ ((unsigned int*)saved_key)[15*MMX_COEF + (index&3) + (index>>2)*SHA_BUF_SIZ*MMX_COEF] = len << 3; #else strnzcpy(saved_plain[index], key, PLAINTEXT_LENGTH + 1); #endif } static char *get_key(int index) { #ifdef MMX_COEF unsigned int i, s; static char out[PLAINTEXT_LENGTH + 1]; s = (((unsigned int*)saved_key)[15*MMX_COEF + (index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF] >> 3) - SALT_SIZE - 1; for(i = 0; i < s; i++) out[i] = ((char*)saved_key)[GETPOS(SALT_SIZE + i, index)]; out[i] = 0; return out; #else return saved_plain[index]; #endif } static void *salt(char *ciphertext) { static union { unsigned char c[SALT_SIZE]; ARCH_WORD_32 w; } out; ciphertext++; memcpy(out.c, ciphertext, SALT_SIZE); return (void*)out.c; } static void set_salt(void *salt) { #ifdef MMX_COEF int i, index; for (index = 0; index < kpc; index++) for (i = 0; i < SALT_SIZE; i++) saved_key[0][GETPOS(i, index)] = ((unsigned char*)salt)[i]; #else memcpy(saved_salt, salt, SALT_SIZE); #endif } static int cmp_all(void *binary, int count) { #ifdef MMX_COEF unsigned int x, y=0; for(; y<kpc / MMX_COEF; y++) for(x = 0; x < MMX_COEF; x++) { if(((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5]) return 1; } return 0; #else int index = 0; #ifdef _OPENMP for (index = 0; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef MMX_COEF unsigned int x, y; x = index & 3; y = index / 4; if(((ARCH_WORD_32*)binary)[0] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5]) return 0; if(((ARCH_WORD_32*)binary)[1] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*1]) return 0; if(((ARCH_WORD_32*)binary)[2] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*2]) return 0; if(((ARCH_WORD_32*)binary)[3] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*3]) return 0; if(((ARCH_WORD_32*)binary)[4] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*4]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for for (index = 0; index < loops; ++index) #endif { #ifdef MMX_COEF SSESHA1body(saved_key[index], (unsigned int*)crypt_key[index], NULL, SSEi_MIXED_IN); #else SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char*)saved_salt, SALT_SIZE); SHA1_Update(&ctx, (unsigned char*)saved_plain[index], strlen(saved_plain[index]) + 1); SHA1_Final((unsigned char*)crypt_key[index], &ctx); #endif } return count; } #ifdef MMX_COEF #define HASH_IDX ((index&3)+(index/4)*MMX_COEF*5) static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xf; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xff; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xfff; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xffff; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xfffff; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xffffff; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0x7ffffff; } #else static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; } #endif static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_ctrxns = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, binary, salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
ofmo-oneint-core.c
/** * @file ofmo-oneint-core.c * 1つのCSペアに対する1電子積分を計算する関数群 * */ /** * @defgroup core-oneint 1電子積分を行う関数群 * * 1つのCSペアに対する1電子積分を計算する関数群。 * 2011/06/16現在は、(s,s)から(d,d)までの積分を行える * ようになっている。 * この関数は粒度が小さいので、CとFortranで共通のAPIにするため、 * すべての引数が参照渡しになっている。 * * 各種積分を行う関数 \c oneint_core_xx_ はすべて同じ * 引数をとる。その引数の説明を記す。 * * @param[in] *ips0 1つ目のCSに属するPSの先頭PS番号 * @param[in] *nps_i 1つ目のCSの縮約長(PS数) * @param[in] A[3] 1つ目のCSの中心座標(au 単位) * @param[in] *ips0 2つ目のCSに属するPSの先頭PS番号 * @param[in] *nps_j 2つ目のCSの縮約長(PS数) * @param[in] B[3] 2つ目のCSの中心座標(au 単位) * @param[in] prim_exp[ips] PS番号 \c ips 番目のPSの軌道指数 * @param[in] prim_coe[ips] PS番号 \c ips 番目のPSの規格化定数込みの * 縮約係数 * @param[in] *nat 原子数 * @param[in] atom_x[iat] 原子の番号 \c iat の原子のx座標 * @param[in] atom_y[iat] 原子の番号 \c iat の原子のy座標 * @param[in] atom_z[iat] 原子の番号 \c iat の原子のz座標 * @param[in] atomic_number[iat] 原子の番号 \c iat の原子の原子番号 * * @param[out] OVI[] 計算した重なり積分を代入する配列 * @param[out] HCORE[] 計算した1電子ハミルトン行列要素 * (運動エネルギー積分+核-引力積分)を代入する配列 * * @ingroup integ-core * */ #include <stdio.h> #include <math.h> #include "fmt.h" #include "ofmo-def.h" #define ZERO 0.e0 #define HALF .5e0 #define EPS_PS_PAIR 1.e-32 static double EPS_PS_PAIR_NAI; static double _PI_32_; static double _2PI_; static double _inv2_; static double _inv3_; static double _spi2_; /** 1電子積分関数の初期化を行う関数 * * 1電子積分計算で必要な定数などの初期化を行う * @ingroup core-oneint * */ int ofmo_oneint_init( ) { double pi; static int called = false; if ( called ) return 0; pi = 4.e0 * atan( 1.e0 ); _PI_32_ = pi * sqrt(pi); _2PI_ = 2.e0 * pi; _inv2_ = 1.e0/2.e0; _inv3_ = 1.e0/3.e0; _spi2_ = sqrt( 0.5e0*pi ); EPS_PS_PAIR_NAI = EPS_PS_PAIR; called = true; return 0; } int ofmo_oneint_set_sum_atomic_numbers( const int sum_atomic_number ) { if ( sum_atomic_number < 1 ) { EPS_PS_PAIR_NAI = EPS_PS_PAIR; } else { EPS_PS_PAIR_NAI = EPS_PS_PAIR / (double)sum_atomic_number; } return 0; } extern double *FMT_fmt_table0; extern double *FMT_fmt_table1; extern double *FMT_fmt_table2; extern double *FMT_fmt_table3; extern double *FMT_fmt_table4; extern double FMT_fmt_step_size; extern double FMT_fmt_inv_step_size; extern double FMT_pi_div2; /** 1つのCS対のSSタイプの1電子積分を計算する * @ingroup core-oneint * */ void oneint_core_ss__( const int *ips0, const int *nps_i, const double A[3], const int *jps0, const int *nps_j, const double B[3], const double prim_exp[], const double prim_coe[], const int *nat, const double atom_x[], const double atom_y[], const double atom_z[], const int atomic_number[], double OVI[], double HCORE[] ) { int i, ips, jps, kat, ips1, jps1; double zeta_a, zeta_b, zeta, coef_a, coef_b, coef; double sqrzi, zi, xiza, xi, xiab2, exp_ab, css; double oviss, keiss, naiss; double sum, PC2, dq, U; double BA[3], P[3], AB2; double _pi32_ = _PI_32_, _2pi_ = _2PI_; OVI[0] = ZERO; HCORE[0] = ZERO; AB2 = ZERO; for ( i=0; i<3; i++ ) { BA[i] = B[i]-A[i]; AB2 += BA[i]*BA[i]; } ips1 = *ips0 + (*nps_i); jps1 = *jps0 + (*nps_j); /*// debug //#pragma omp master { printf("FMT_fmt_table0 = %p\n", FMT_fmt_table0 ); printf("ips1, jps1 = %d, %d\n", ips1, jps1 ); fflush(stdout); }*/ for ( ips=(*ips0); ips<ips1; ips++ ) { zeta_a = prim_exp[ips]; coef_a = prim_coe[ips]; for ( jps=(*jps0); jps<jps1; jps++ ) { zeta_b = prim_exp[jps]; coef_b = prim_coe[jps]; zeta = zeta_a + zeta_b; sqrzi = sqrt( 1.e0 / zeta ); zi = sqrzi * sqrzi; coef = coef_a * coef_b; xiza = zeta_b * zi; xi = xiza * zeta_a; xiab2 = xi * AB2; exp_ab = coef * exp( -xiab2 ); oviss = _pi32_ * zi * sqrzi * exp_ab; keiss = xi * ( 3.e0 - 2.e0 * xiab2 ) * oviss; OVI[0] += oviss; HCORE[0] += keiss; css = _2pi_ * zi * exp_ab; if ( fabs(css) < EPS_PS_PAIR_NAI ) continue; for ( i=0; i<3; i++ ) P[i] = xiza*BA[i] + A[i]; sum = ZERO; for ( kat=0; kat<(*nat); kat++ ) { dq = (double)atomic_number[kat]; PC2 = (P[0]-atom_x[kat])*(P[0]-atom_x[kat]); PC2 += (P[1]-atom_y[kat])*(P[1]-atom_y[kat]); PC2 += (P[2]-atom_z[kat])*(P[2]-atom_z[kat]); U = zeta * PC2; { int it0, pos; double dT; if ( U < 36e0 ) { it0 = (int)(0.5e0 + U * FMT_fmt_inv_step_size); dT = it0 * FMT_fmt_step_size - U; pos = it0 * 4; naiss = css * ((( FMT_fmt_table0[pos+3] * dT + FMT_fmt_table0[pos+2] ) * dT + FMT_fmt_table0[pos+1] ) * dT + FMT_fmt_table0[pos+0] ); } else { naiss = css * _spi2_ * sqrt( 1.e0/(U+U) ); } } //fmt( &naiss, 0, U, css ); sum += (dq*naiss); } HCORE[0] -= sum; } } } /** 1つのCS対のPSタイプの1電子積分を計算する * @ingroup core-oneint * */ void oneint_core_ps__( const int *ips0, const int *nps_i, const double A[3], const int *jps0, const int *nps_j, const double B[3], const double prim_exp[], const double prim_coe[], const int *nat, const double atom_x[], const double atom_y[], const double atom_z[], const int atomic_number[], double OVI[], double HCORE[] ) { int i, ips, jps, kat, ips1, jps1; double zeta_a, zeta_b, zeta, coef_a, coef_b, coef; double sqrzi, zi, xiza, xi, xiab2, xi2, exp_ab, css, cnass; double oviss, keiss; double PC2, dq, U; double BA[3], P[3], PA[3], PC[3], AB2; double keips[3], ovips[3], naips[3]; double naiss[1+1]; double _pi32_ = _PI_32_, _2pi_ = _2PI_; for ( i=0; i<3; i++ ) OVI[i] = HCORE[i] = ZERO; AB2 = ZERO; for ( i=0; i<3; i++ ) { BA[i] = B[i]-A[i]; AB2 += BA[i]*BA[i]; } ips1 = *ips0 + (*nps_i); jps1 = *jps0 + (*nps_j); for ( ips=(*ips0); ips<ips1; ips++ ) { zeta_a = prim_exp[ips]; coef_a = prim_coe[ips]; for ( jps=(*jps0); jps<jps1; jps++ ) { zeta_b = prim_exp[jps]; coef_b = prim_coe[jps]; zeta = zeta_a + zeta_b; sqrzi = sqrt( 1.e0 / zeta ); zi = sqrzi * sqrzi; coef = coef_a * coef_b; xiza = zeta_b * zi; xi = xiza * zeta_a; xiab2 = xi * AB2; xi2 = 2.e0 * xi; exp_ab = coef * exp( -xiab2 ); oviss = _pi32_ * zi * sqrzi * exp_ab; keiss = xi * ( 3.e0 - 2.e0 * xiab2 ) * oviss; css = _2pi_ * zi * exp_ab; if ( fabs(css) < EPS_PS_PAIR_NAI ) continue; for ( i=0; i<3; i++ ) { P[i] = xiza*BA[i] + A[i]; PA[i] = xiza*BA[i]; } // overlap integral for ( i=0; i<3; i++ ) ovips[i] = PA[i]*oviss; // kinetic energy integral for ( i=0; i<3; i++ ) keips[i] = PA[i]*keiss + xi2*ovips[i]; for ( i=0; i<3; i++ ) naips[i] = ZERO; for ( kat=0; kat<(*nat); kat++ ) { dq = (double)atomic_number[kat]; PC[0] = P[0]-atom_x[kat]; PC[1] = P[1]-atom_y[kat]; PC[2] = P[2]-atom_z[kat]; PC2 = PC[0]*PC[0] + PC[1]*PC[1] + PC[2]*PC[2]; cnass = dq * css; U = zeta * PC2; { int it0, pos; double dT, t_inv, st_inv, dT2, dT3; if ( U < 38e0 ) { it0 = (int)(0.5e0 + U * FMT_fmt_inv_step_size); dT = it0 * FMT_fmt_step_size - U; dT2 = dT * _inv2_; dT3 = dT * _inv3_; pos = it0 * (1+4); naiss[0] = cnass*(((FMT_fmt_table1[pos+3] * dT3 + FMT_fmt_table1[pos+2] ) * dT2 + FMT_fmt_table1[pos+1] ) * dT + FMT_fmt_table1[pos+0] ); naiss[1] = cnass*(((FMT_fmt_table1[pos+4] * dT3 + FMT_fmt_table1[pos+3] ) * dT2 + FMT_fmt_table1[pos+2] ) * dT + FMT_fmt_table1[pos+1] ); } else { st_inv = sqrt( 0.5e0 / U ); t_inv = st_inv * st_inv; naiss[0] = cnass * _spi2_ * st_inv; naiss[1] = t_inv * naiss[0]; } } //fmt( naiss, 1, U, cnass ); for ( i=0; i<3; i++ ) naips[i] += PA[i]*naiss[0]-PC[i]*naiss[1]; } // contraction for ( i=0; i<3; i++ ) { OVI[i] += ovips[i]; HCORE[i] += (keips[i] - naips[i]); } } } } /** 1つのCS対のPPタイプの1電子積分を計算する * @ingroup core-oneint * */ void oneint_core_pp__( const int *ips0, const int *nps_i, const double A[3], const int *jps0, const int *nps_j, const double B[3], const double prim_exp[], const double prim_coe[], const int *nat, const double atom_x[], const double atom_y[], const double atom_z[], const int atomic_number[], double OVI[], double HCORE[] ) { int i, m, ips, jps, kat, ips1, jps1; double zeta_a, zeta_b, zeta, coef_a, coef_b, coef; double sqrzi, zi, xiza, xi, xiab2, xi2, exp_ab, css, zeta2; double oviss, keiss, tmp; double BA[3], P[3], PA[3], PC[3], PB[3], AB2; double keips[3], keipp[3*3], ovips[3], ovipp[3*3]; double PC2, dq, U; double naiss[2+1], naips[2][3], naipp[3*3]; double _pi32_ = _PI_32_, _2pi_ = _2PI_; for ( i=0; i<3*3; i++ ) OVI[i] = HCORE[i] = ZERO; AB2 = ZERO; for ( i=0; i<3; i++ ) { BA[i] = B[i]-A[i]; AB2 += BA[i]*BA[i]; } ips1 = *ips0 + (*nps_i); jps1 = *jps0 + (*nps_j); for ( ips=(*ips0); ips<ips1; ips++ ) { zeta_a = prim_exp[ips]; coef_a = prim_coe[ips]; for ( jps=(*jps0); jps<jps1; jps++ ) { zeta_b = prim_exp[jps]; coef_b = prim_coe[jps]; zeta = zeta_a + zeta_b; sqrzi = sqrt( 1.e0 / zeta ); zi = sqrzi * sqrzi; coef = coef_a * coef_b; xiza = zeta_b * zi; xi = xiza * zeta_a; xiab2 = xi * AB2; xi2 = 2.e0 * xi; zeta2 = HALF * zi; exp_ab = coef * exp( -xiab2 ); oviss = _pi32_ * zi * sqrzi * exp_ab; keiss = xi * ( 3.e0 - 2.e0 * xiab2 ) * oviss; css = _2pi_ * zi * exp_ab; if ( fabs(css) < EPS_PS_PAIR_NAI ) continue; for ( i=0; i<3; i++ ) { P[i] = xiza*BA[i] + A[i]; PA[i] = xiza*BA[i]; PB[i] = xiza*BA[i] - BA[i]; } // overlap integral // (p|s) for ( i=0; i<3; i++ ) ovips[i] = PA[i]*oviss; // (p|p) ovipp[0*3+0] = PB[0]*ovips[0] + zeta2*oviss; ovipp[0*3+1] = PB[1]*ovips[0]; ovipp[0*3+2] = PB[2]*ovips[0]; ovipp[1*3+0] = PB[0]*ovips[1]; ovipp[1*3+1] = PB[1]*ovips[1] + zeta2*oviss; ovipp[1*3+2] = PB[2]*ovips[1]; ovipp[2*3+0] = PB[0]*ovips[2]; ovipp[2*3+1] = PB[1]*ovips[2]; ovipp[2*3+2] = PB[2]*ovips[2] + zeta2*oviss; // kinetic energy integral // (p|T|s) for ( i=0; i<3; i++ ) keips[i] = PA[i]*keiss + xi2*ovips[i]; // (p|T|p) keipp[0*3+0] = PB[0]*keips[0] + xi2*ovipp[0*3+0] + zeta2*keiss; keipp[0*3+1] = PB[1]*keips[0] + xi2*ovipp[0*3+1]; keipp[0*3+2] = PB[2]*keips[0] + xi2*ovipp[0*3+2]; keipp[1*3+0] = PB[0]*keips[1] + xi2*ovipp[1*3+0]; keipp[1*3+1] = PB[1]*keips[1] + xi2*ovipp[1*3+1] + zeta2*keiss; keipp[1*3+2] = PB[2]*keips[1] + xi2*ovipp[1*3+2]; keipp[2*3+0] = PB[0]*keips[2] + xi2*ovipp[2*3+0]; keipp[2*3+1] = PB[1]*keips[2] + xi2*ovipp[2*3+1]; keipp[2*3+2] = PB[2]*keips[2] + xi2*ovipp[2*3+2] + zeta2*keiss; for ( i=0; i<3*3; i++ ) naipp[i] = ZERO; for ( kat=0; kat<(*nat); kat++ ) { dq = (double)atomic_number[kat]; PC[0] = P[0]-atom_x[kat]; PC[1] = P[1]-atom_y[kat]; PC[2] = P[2]-atom_z[kat]; PC2 = PC[0]*PC[0] + PC[1]*PC[1] + PC[2]*PC[2]; U = zeta * PC2; { int it0, pos; double dT, t_inv, st_inv, dT2, dT3, cnass; cnass = dq*css; if ( U < 40e0 ) { it0 = (int)(0.5e0 + U * FMT_fmt_inv_step_size); dT = it0 * FMT_fmt_step_size - U; dT2 = dT * _inv2_; dT3 = dT * _inv3_; pos = it0 * (2+4); naiss[0] = cnass*(((FMT_fmt_table2[pos+3] * dT3 + FMT_fmt_table2[pos+2] ) * dT2 + FMT_fmt_table2[pos+1] ) * dT + FMT_fmt_table2[pos+0] ); naiss[1] = cnass*(((FMT_fmt_table2[pos+4] * dT3 + FMT_fmt_table2[pos+3] ) * dT2 + FMT_fmt_table2[pos+2] ) * dT + FMT_fmt_table2[pos+1] ); naiss[2] = cnass*(((FMT_fmt_table2[pos+5] * dT3 + FMT_fmt_table2[pos+4] ) * dT2 + FMT_fmt_table2[pos+3] ) * dT + FMT_fmt_table2[pos+2] ); } else { st_inv = sqrt( 0.5e0 / U ); t_inv = st_inv * st_inv; naiss[0] = cnass * _spi2_ * st_inv; naiss[1] = t_inv * naiss[0]; naiss[2] = 3.e0 * t_inv * naiss[1]; } } //fmt( naiss, 2, U, dq*css ); // -- (P|A(0)|S) -- for ( m=0; m<=1; m++) { naips[m][0] = PA[0]*naiss[m] - PC[0]*naiss[m+1]; naips[m][1] = PA[1]*naiss[m] - PC[1]*naiss[m+1]; naips[m][2] = PA[2]*naiss[m] - PC[2]*naiss[m+1]; } // -- (P|A|P) -- tmp = zeta2*( naiss[0] - naiss[1] ); naipp[0*3+0] += PB[0]*naips[0][0]-PC[0]*naips[1][0]+tmp; naipp[0*3+1] += PB[1]*naips[0][0]-PC[1]*naips[1][0]; naipp[0*3+2] += PB[2]*naips[0][0]-PC[2]*naips[1][0]; naipp[1*3+0] += PB[0]*naips[0][1]-PC[0]*naips[1][1]; naipp[1*3+1] += PB[1]*naips[0][1]-PC[1]*naips[1][1]+tmp; naipp[1*3+2] += PB[2]*naips[0][1]-PC[2]*naips[1][1]; naipp[2*3+0] += PB[0]*naips[0][2]-PC[0]*naips[1][2]; naipp[2*3+1] += PB[1]*naips[0][2]-PC[1]*naips[1][2]; naipp[2*3+2] += PB[2]*naips[0][2]-PC[2]*naips[1][2]+tmp; } // contraction for ( i=0; i<3*3; i++ ) { OVI[i] += ovipp[i]; HCORE[i] += (keipp[i] - naipp[i]); } } // jps } // ips } /** 1つのCS対のDSタイプの1電子積分を計算する * @ingroup core-oneint * */ void oneint_core_ds__( const int *ips0, const int *nps_i, const double A[3], const int *jps0, const int *nps_j, const double B[3], const double prim_exp[], const double prim_coe[], const int *nat, const double atom_x[], const double atom_y[], const double atom_z[], const int atomic_number[], double OVI[], double HCORE[] ) { int i, m, ips, jps, kat, ips1, jps1; double zeta_a, zeta_b, zeta, coef_a, coef_b, coef; double sqrzi, zi, xiza, xi, xiab2, xi2, exp_ab, css, zeta2; double oviss, keiss, tmp; double BA[3], P[3], PA[3], PC[3], AB2; double keips[3], keids[6], ovips[3], ovids[6]; double PC2, dq, U; double naiss[2+1], naips[2][3], naids[6]; double _pi32_ = _PI_32_, _2pi_ = _2PI_; double sqr3; sqr3 = sqrt(3.e0); for ( i=0; i<6; i++ ) OVI[i] = HCORE[i] = ZERO; AB2 = ZERO; for ( i=0; i<3; i++ ) { BA[i] = B[i]-A[i]; AB2 += BA[i]*BA[i]; } ips1 = *ips0 + (*nps_i); jps1 = *jps0 + (*nps_j); for ( ips=(*ips0); ips<ips1; ips++ ) { zeta_a = prim_exp[ips]; coef_a = prim_coe[ips]; for ( jps=(*jps0); jps<jps1; jps++ ) { zeta_b = prim_exp[jps]; coef_b = prim_coe[jps]; zeta = zeta_a + zeta_b; sqrzi = sqrt( 1.e0 / zeta ); zi = sqrzi * sqrzi; coef = coef_a * coef_b; xiza = zeta_b * zi; xi = xiza * zeta_a; xiab2 = xi * AB2; xi2 = 2.e0 * xi; zeta2 = HALF * zi; exp_ab = coef * exp( -xiab2 ); oviss = _pi32_ * zi * sqrzi * exp_ab; keiss = xi * ( 3.e0 - 2.e0 * xiab2 ) * oviss; css = _2pi_ * zi * exp_ab; if ( fabs(css) < EPS_PS_PAIR_NAI ) continue; for ( i=0; i<3; i++ ) { P[i] = xiza*BA[i] + A[i]; PA[i] = xiza*BA[i]; } // overlap integral // -- (P||S) -- ovips[0] = PA[0]*oviss; ovips[1] = PA[1]*oviss; ovips[2] = PA[2]*oviss; // -- (D||S) -- ovids[0] = PA[0]*ovips[0] + zeta2*oviss; ovids[1] = PA[1]*ovips[1] + zeta2*oviss; ovids[2] = PA[2]*ovips[2] + zeta2*oviss; ovids[3] = PA[0]*ovips[1]; ovids[4] = PA[1]*ovips[2]; ovids[5] = PA[2]*ovips[0]; // kinetic energy integral // -- (P|T|S) -- keips[0] = PA[0]*keiss + xi2*ovips[0]; keips[1] = PA[1]*keiss + xi2*ovips[1]; keips[2] = PA[2]*keiss + xi2*ovips[2]; // -- (D|T|S) -- tmp = zeta2*keiss - xiza*oviss; keids[0] = PA[0]*keips[0] + xi2*ovids[0] + tmp; keids[1] = PA[1]*keips[1] + xi2*ovids[1] + tmp; keids[2] = PA[2]*keips[2] + xi2*ovids[2] + tmp; keids[3] = PA[0]*keips[1] + xi2*ovids[3]; keids[4] = PA[1]*keips[2] + xi2*ovids[4]; keids[5] = PA[2]*keips[0] + xi2*ovids[5]; for ( i=0; i<6; i++ ) naids[i] = ZERO; for ( kat=0; kat<(*nat); kat++ ) { dq = (double)atomic_number[kat]; PC[0] = P[0]-atom_x[kat]; PC[1] = P[1]-atom_y[kat]; PC[2] = P[2]-atom_z[kat]; PC2 = PC[0]*PC[0] + PC[1]*PC[1] + PC[2]*PC[2]; U = zeta * PC2; { int it0, pos; double dT, t_inv, st_inv, dT2, dT3, cnass; cnass = dq*css; if ( U < 40e0 ) { it0 = (int)(0.5e0 + U * FMT_fmt_inv_step_size); dT = it0 * FMT_fmt_step_size - U; dT2 = dT * _inv2_; dT3 = dT * _inv3_; pos = it0 * (2+4); naiss[0] = cnass*(((FMT_fmt_table2[pos+3] * dT3 + FMT_fmt_table2[pos+2] ) * dT2 + FMT_fmt_table2[pos+1] ) * dT + FMT_fmt_table2[pos+0] ); naiss[1] = cnass*(((FMT_fmt_table2[pos+4] * dT3 + FMT_fmt_table2[pos+3] ) * dT2 + FMT_fmt_table2[pos+2] ) * dT + FMT_fmt_table2[pos+1] ); naiss[2] = cnass*(((FMT_fmt_table2[pos+5] * dT3 + FMT_fmt_table2[pos+4] ) * dT2 + FMT_fmt_table2[pos+3] ) * dT + FMT_fmt_table2[pos+2] ); } else { st_inv = sqrt( 0.5e0 / U ); t_inv = st_inv * st_inv; naiss[0] = cnass * _spi2_ * st_inv; naiss[1] = t_inv * naiss[0]; naiss[2] = 3.e0 * t_inv * naiss[1]; } } //fmt( naiss, 2, U, dq*css ); // -- (P|A(0)|S) -- for ( m=0; m<=1; m++) { naips[m][0] = PA[0]*naiss[m] - PC[0]*naiss[m+1]; naips[m][1] = PA[1]*naiss[m] - PC[1]*naiss[m+1]; naips[m][2] = PA[2]*naiss[m] - PC[2]*naiss[m+1]; } // -- (D|A|S) -- tmp = zeta2*( naiss[0] - naiss[1] ); naids[0] += PA[0]*naips[0][0] - PC[0]*naips[1][0] + tmp; naids[1] += PA[1]*naips[0][1] - PC[1]*naips[1][1] + tmp; naids[2] += PA[2]*naips[0][2] - PC[2]*naips[1][2] + tmp; naids[3] += PA[0]*naips[0][1] - PC[0]*naips[1][1]; naids[4] += PA[1]*naips[0][2] - PC[1]*naips[1][2]; naids[5] += PA[2]*naips[0][0] - PC[2]*naips[1][0]; } // contraction for ( i=0; i<6; i++ ) { OVI[i] += ovids[i]; HCORE[i] += (keids[i] - naids[i]); } } // jps } // ips for ( i=3; i<6; i++ ) { OVI[i] *= sqr3; HCORE[i] *= sqr3; } } /** 1つのCS対のDPタイプの1電子積分を計算する * @ingroup core-oneint * */ void oneint_core_dp__( const int *ips0, const int *nps_i, const double A[3], const int *jps0, const int *nps_j, const double B[3], const double prim_exp[], const double prim_coe[], const int *nat, const double atom_x[], const double atom_y[], const double atom_z[], const int atomic_number[], double OVI[], double HCORE[] ) { int i, j, ij, m, ips, jps, kat, ips1, jps1; double zeta_a, zeta_b, zeta, coef_a, coef_b, coef; double sqrzi, zi, xiza, xi, xiab2, xi2, exp_ab, css, zeta2; double oviss, keiss, tmp, tmpps[3]; double BA[3], P[3], PA[3], PC[3], PB[3], AB2; double keips[3], keids[6], keidp[6*3]; double ovips[3], ovids[6], ovidp[6*3]; double PC2, dq, U; double naiss[3+1], naips[2+1][3], naids[1+1][6], naidp[6*3]; double _pi32_ = _PI_32_, _2pi_ = _2PI_; double sqr3; sqr3 = sqrt(3.e0); for ( i=0; i<6*3; i++ ) OVI[i] = HCORE[i] = ZERO; AB2 = ZERO; for ( i=0; i<3; i++ ) { BA[i] = B[i]-A[i]; AB2 += BA[i]*BA[i]; } ips1 = *ips0 + (*nps_i); jps1 = *jps0 + (*nps_j); for ( ips=(*ips0); ips<ips1; ips++ ) { zeta_a = prim_exp[ips]; coef_a = prim_coe[ips]; for ( jps=(*jps0); jps<jps1; jps++ ) { zeta_b = prim_exp[jps]; coef_b = prim_coe[jps]; zeta = zeta_a + zeta_b; sqrzi = sqrt( 1.e0 / zeta ); zi = sqrzi * sqrzi; coef = coef_a * coef_b; xiza = zeta_b * zi; xi = xiza * zeta_a; xiab2 = xi * AB2; xi2 = 2.e0 * xi; zeta2 = HALF * zi; exp_ab = coef * exp( -xiab2 ); oviss = _pi32_ * zi * sqrzi * exp_ab; keiss = xi * ( 3.e0 - 2.e0 * xiab2 ) * oviss; css = _2pi_ * zi * exp_ab; if ( fabs(css) < EPS_PS_PAIR_NAI ) continue; for ( i=0; i<3; i++ ) { P[i] = xiza*BA[i] + A[i]; PA[i] = xiza*BA[i]; PB[i] = xiza*BA[i] - BA[i]; } // overlap integral // -- (P||S) -- ovips[0] = PA[0]*oviss; ovips[1] = PA[1]*oviss; ovips[2] = PA[2]*oviss; // -- (D||S) -- ovids[0] = PA[0]*ovips[0] + zeta2*oviss; ovids[1] = PA[1]*ovips[1] + zeta2*oviss; ovids[2] = PA[2]*ovips[2] + zeta2*oviss; ovids[3] = PA[0]*ovips[1]; ovids[4] = PA[1]*ovips[2]; ovids[5] = PA[2]*ovips[0]; // -- (D||P) -- ovidp[0*3+0] = PB[0]*ovids[0] + zi *ovips[0]; ovidp[0*3+1] = PB[1]*ovids[0]; ovidp[0*3+2] = PB[2]*ovids[0]; ovidp[1*3+0] = PB[0]*ovids[1]; ovidp[1*3+1] = PB[1]*ovids[1] + zi *ovips[1]; ovidp[1*3+2] = PB[2]*ovids[1]; ovidp[2*3+0] = PB[0]*ovids[2]; ovidp[2*3+1] = PB[1]*ovids[2]; ovidp[2*3+2] = PB[2]*ovids[2] + zi *ovips[2]; ovidp[3*3+0] = PB[0]*ovids[3] + zeta2*ovips[1]; ovidp[3*3+1] = PB[1]*ovids[3] + zeta2*ovips[0]; ovidp[3*3+2] = PB[2]*ovids[3]; ovidp[4*3+0] = PB[0]*ovids[4]; ovidp[4*3+1] = PB[1]*ovids[4] + zeta2*ovips[2]; ovidp[4*3+2] = PB[2]*ovids[4] + zeta2*ovips[1]; ovidp[5*3+0] = PB[0]*ovids[5] + zeta2*ovips[2]; ovidp[5*3+1] = PB[1]*ovids[5]; ovidp[5*3+2] = PB[2]*ovids[5] + zeta2*ovips[0]; // kinetic energy integral // -- (P|T|S) -- keips[0] = PA[0]*keiss + xi2*ovips[0]; keips[1] = PA[1]*keiss + xi2*ovips[1]; keips[2] = PA[2]*keiss + xi2*ovips[2]; // -- (D|T|S) -- tmp = zeta2*keiss - xiza*oviss; keids[0] = PA[0]*keips[0] + xi2*ovids[0] + tmp; keids[1] = PA[1]*keips[1] + xi2*ovids[1] + tmp; keids[2] = PA[2]*keips[2] + xi2*ovids[2] + tmp; keids[3] = PA[0]*keips[1] + xi2*ovids[3]; keids[4] = PA[1]*keips[2] + xi2*ovids[4]; keids[5] = PA[2]*keips[0] + xi2*ovids[5]; // -- (D|T|P) -- keidp[0*3+0] = PB[0]*keids[0] + xi2*ovidp[0*3+0] + zi*keips[0]; keidp[0*3+1] = PB[1]*keids[0] + xi2*ovidp[0*3+1]; keidp[0*3+2] = PB[2]*keids[0] + xi2*ovidp[0*3+2]; keidp[1*3+0] = PB[0]*keids[1] + xi2*ovidp[1*3+0]; keidp[1*3+1] = PB[1]*keids[1] + xi2*ovidp[1*3+1] + zi*keips[1]; keidp[1*3+2] = PB[2]*keids[1] + xi2*ovidp[1*3+2]; keidp[2*3+0] = PB[0]*keids[2] + xi2*ovidp[2*3+0]; keidp[2*3+1] = PB[1]*keids[2] + xi2*ovidp[2*3+1]; keidp[2*3+2] = PB[2]*keids[2] + xi2*ovidp[2*3+2] + zi*keips[2]; keidp[3*3+0] = PB[0]*keids[3] + xi2*ovidp[3*3+0] + zeta2*keips[1]; keidp[3*3+1] = PB[1]*keids[3] + xi2*ovidp[3*3+1] + zeta2*keips[0]; keidp[3*3+2] = PB[2]*keids[3] + xi2*ovidp[3*3+2]; keidp[4*3+0] = PB[0]*keids[4] + xi2*ovidp[4*3+0]; keidp[4*3+1] = PB[1]*keids[4] + xi2*ovidp[4*3+1] + zeta2*keips[2]; keidp[4*3+2] = PB[2]*keids[4] + xi2*ovidp[4*3+2] + zeta2*keips[1]; keidp[5*3+0] = PB[0]*keids[5] + xi2*ovidp[5*3+0] + zeta2*keips[2]; keidp[5*3+1] = PB[1]*keids[5] + xi2*ovidp[5*3+1]; keidp[5*3+2] = PB[2]*keids[5] + xi2*ovidp[5*3+2] + zeta2*keips[0]; for ( i=0; i<6*3; i++ ) naidp[i] = ZERO; for ( kat=0; kat<(*nat); kat++ ) { dq = (double)atomic_number[kat]; PC[0] = P[0]-atom_x[kat]; PC[1] = P[1]-atom_y[kat]; PC[2] = P[2]-atom_z[kat]; PC2 = PC[0]*PC[0] + PC[1]*PC[1] + PC[2]*PC[2]; U = zeta * PC2; { int it0, pos; double dT, t_inv, st_inv, dT2, dT3, cnass; cnass = dq*css; if ( U < 42e0 ) { it0 = (int)(0.5e0 + U * FMT_fmt_inv_step_size); dT = it0 * FMT_fmt_step_size - U; dT2 = dT * _inv2_; dT3 = dT * _inv3_; pos = it0 * (3+4); naiss[0] = cnass*(((FMT_fmt_table3[pos+3] * dT3 + FMT_fmt_table3[pos+2] ) * dT2 + FMT_fmt_table3[pos+1] ) * dT + FMT_fmt_table3[pos+0] ); naiss[1] = cnass*(((FMT_fmt_table3[pos+4] * dT3 + FMT_fmt_table3[pos+3] ) * dT2 + FMT_fmt_table3[pos+2] ) * dT + FMT_fmt_table3[pos+1] ); naiss[2] = cnass*(((FMT_fmt_table3[pos+5] * dT3 + FMT_fmt_table3[pos+4] ) * dT2 + FMT_fmt_table3[pos+3] ) * dT + FMT_fmt_table3[pos+2] ); naiss[3] = cnass*(((FMT_fmt_table3[pos+6] * dT3 + FMT_fmt_table3[pos+5] ) * dT2 + FMT_fmt_table3[pos+4] ) * dT + FMT_fmt_table3[pos+3] ); } else { st_inv = sqrt( 0.5e0 / U ); t_inv = st_inv * st_inv; naiss[0] = cnass * _spi2_ * st_inv; naiss[1] = t_inv * naiss[0]; naiss[2] = 3.e0 * t_inv * naiss[1]; naiss[3] = 5.e0 * t_inv * naiss[2]; } } //fmt( naiss, 3, U, dq*css ); // -- (P|A(0)|S) -- for ( m=0; m<=2; m++) { naips[m][0] = PA[0]*naiss[m] - PC[0]*naiss[m+1]; naips[m][1] = PA[1]*naiss[m] - PC[1]*naiss[m+1]; naips[m][2] = PA[2]*naiss[m] - PC[2]*naiss[m+1]; } // -- (D|A(0)|S) -- for (m=0; m<=1; m++) { tmp = naiss[m] - naiss[m+1]; naids[m][0] = PA[0]*naips[m][0]-PC[0]*naips[m+1][0] + zeta2*tmp; naids[m][1] = PA[1]*naips[m][1]-PC[1]*naips[m+1][1] + zeta2*tmp; naids[m][2] = PA[2]*naips[m][2]-PC[2]*naips[m+1][2] + zeta2*tmp; naids[m][3] = PA[0]*naips[m][1]-PC[0]*naips[m+1][1]; naids[m][4] = PA[1]*naips[m][2]-PC[1]*naips[m+1][2]; naids[m][5] = PA[2]*naips[m][0]-PC[2]*naips[m+1][0]; } // -- (D|A|P) -- tmpps[0] = naips[0][0] - naips[1][0]; tmpps[1] = naips[0][1] - naips[1][1]; tmpps[2] = naips[0][2] - naips[1][2]; naidp[0*3+0] += PB[0]*naids[0][0] - PC[0]*naids[1][0] + zi*tmpps[0]; naidp[0*3+1] += PB[1]*naids[0][0] - PC[1]*naids[1][0]; naidp[0*3+2] += PB[2]*naids[0][0] - PC[2]*naids[1][0]; naidp[1*3+0] += PB[0]*naids[0][1] - PC[0]*naids[1][1]; naidp[1*3+1] += PB[1]*naids[0][1] - PC[1]*naids[1][1] + zi*tmpps[1]; naidp[1*3+2] += PB[2]*naids[0][1] - PC[2]*naids[1][1]; naidp[2*3+0] += PB[0]*naids[0][2] - PC[0]*naids[1][2]; naidp[2*3+1] += PB[1]*naids[0][2] - PC[1]*naids[1][2]; naidp[2*3+2] += PB[2]*naids[0][2] - PC[2]*naids[1][2] + zi*tmpps[2]; naidp[3*3+0] += PB[0]*naids[0][3] - PC[0]*naids[1][3] + zeta2*tmpps[1]; naidp[3*3+1] += PB[1]*naids[0][3] - PC[1]*naids[1][3] + zeta2*tmpps[0]; naidp[3*3+2] += PB[2]*naids[0][3] - PC[2]*naids[1][3]; naidp[4*3+0] += PB[0]*naids[0][4] - PC[0]*naids[1][4]; naidp[4*3+1] += PB[1]*naids[0][4] - PC[1]*naids[1][4] + zeta2*tmpps[2]; naidp[4*3+2] += PB[2]*naids[0][4] - PC[2]*naids[1][4] + zeta2*tmpps[1]; naidp[5*3+0] += PB[0]*naids[0][5] - PC[0]*naids[1][5] + zeta2*tmpps[2]; naidp[5*3+1] += PB[1]*naids[0][5] - PC[1]*naids[1][5]; naidp[5*3+2] += PB[2]*naids[0][5] - PC[2]*naids[1][5] + zeta2*tmpps[0]; } // contraction for ( i=0; i<6*3; i++ ) { OVI[i] += ovidp[i]; HCORE[i] += (keidp[i] - naidp[i]); } } // jps } // ips for ( i=3; i<6; i++ ) { for ( j=0; j<3; j++ ) { ij = i*3+j; OVI[ij] *= sqr3; HCORE[ij] *= sqr3; } } } /** 1つのCS対のDDタイプの1電子積分を計算する * @ingroup core-oneint * */ void oneint_core_dd__( const int *ips0, const int *nps_i, const double A[3], const int *jps0, const int *nps_j, const double B[3], const double prim_exp[], const double prim_coe[], const int *nat, const double atom_x[], const double atom_y[], const double atom_z[], const int atomic_number[], double OVI[], double HCORE[] ) { int i, j, ij, m, ips, jps, kat, ips1, jps1; double zeta_a, zeta_b, zeta, coef_a, coef_b, coef; double sqrzi, zi, xiza, xizb, xi, xiab2, xi2, exp_ab, css, zeta2; double oviss, keiss, tmp, tmpps[3], tmppp[3*3], tmpds[6]; double BA[3], P[3], PA[3], PC[3], PB[3], AB2; double keips[3], keids[6], keipp[3*3], keidp[6*3], keidd[6*6]; double ovips[3], ovids[6], ovipp[3*3], ovidp[6*3], ovidd[6*6]; double PC2, dq, U; double naiss[4+1], naips[3+1][3], naids[2+1][6], naidp[1+1][6*3]; double naipp[1+1][3*3], naidd[6*6]; double _pi32_ = _PI_32_, _2pi_ = _2PI_; double sqr3, coe_a, coe; sqr3 = sqrt(3.e0); for ( i=0; i<6*6; i++ ) OVI[i] = HCORE[i] = ZERO; AB2 = ZERO; for ( i=0; i<3; i++ ) { BA[i] = B[i]-A[i]; AB2 += BA[i]*BA[i]; } ips1 = *ips0 + (*nps_i); jps1 = *jps0 + (*nps_j); for ( ips=(*ips0); ips<ips1; ips++ ) { zeta_a = prim_exp[ips]; coef_a = prim_coe[ips]; for ( jps=(*jps0); jps<jps1; jps++ ) { zeta_b = prim_exp[jps]; coef_b = prim_coe[jps]; zeta = zeta_a + zeta_b; sqrzi = sqrt( 1.e0 / zeta ); zi = sqrzi * sqrzi; coef = coef_a * coef_b; xiza = zeta_b * zi; xizb = zeta_a * zi; xi = xiza * zeta_a; xiab2 = xi * AB2; xi2 = 2.e0 * xi; zeta2 = HALF * zi; exp_ab = coef * exp( -xiab2 ); oviss = _pi32_ * zi * sqrzi * exp_ab; keiss = xi * ( 3.e0 - 2.e0 * xiab2 ) * oviss; css = _2pi_ * zi * exp_ab; if ( fabs(css) < EPS_PS_PAIR_NAI ) continue; for ( i=0; i<3; i++ ) { P[i] = xiza*BA[i] + A[i]; PA[i] = xiza*BA[i]; PB[i] = xiza*BA[i] - BA[i]; } // overlap integral // -- (P||S) -- ovips[0] = PA[0]*oviss; ovips[1] = PA[1]*oviss; ovips[2] = PA[2]*oviss; // -- (D||S) -- ovids[0] = PA[0]*ovips[0] + zeta2*oviss; ovids[1] = PA[1]*ovips[1] + zeta2*oviss; ovids[2] = PA[2]*ovips[2] + zeta2*oviss; ovids[3] = PA[0]*ovips[1]; ovids[4] = PA[1]*ovips[2]; ovids[5] = PA[2]*ovips[0]; // -- (P||P) -- ovipp[0*3+0] = PB[0]*ovips[0] + zeta2*oviss; ovipp[0*3+1] = PB[1]*ovips[0]; ovipp[0*3+2] = PB[2]*ovips[0]; ovipp[1*3+0] = PB[0]*ovips[1]; ovipp[1*3+1] = PB[1]*ovips[1] + zeta2*oviss; ovipp[1*3+2] = PB[2]*ovips[1]; ovipp[2*3+0] = PB[0]*ovips[2]; ovipp[2*3+1] = PB[1]*ovips[2]; ovipp[2*3+2] = PB[2]*ovips[2] + zeta2*oviss; // -- (D||P) -- ovidp[0*3+0] = PB[0]*ovids[0] + zi *ovips[0]; ovidp[0*3+1] = PB[1]*ovids[0]; ovidp[0*3+2] = PB[2]*ovids[0]; ovidp[1*3+0] = PB[0]*ovids[1]; ovidp[1*3+1] = PB[1]*ovids[1] + zi *ovips[1]; ovidp[1*3+2] = PB[2]*ovids[1]; ovidp[2*3+0] = PB[0]*ovids[2]; ovidp[2*3+1] = PB[1]*ovids[2]; ovidp[2*3+2] = PB[2]*ovids[2] + zi *ovips[2]; ovidp[3*3+0] = PB[0]*ovids[3] + zeta2*ovips[1]; ovidp[3*3+1] = PB[1]*ovids[3] + zeta2*ovips[0]; ovidp[3*3+2] = PB[2]*ovids[3]; ovidp[4*3+0] = PB[0]*ovids[4]; ovidp[4*3+1] = PB[1]*ovids[4] + zeta2*ovips[2]; ovidp[4*3+2] = PB[2]*ovids[4] + zeta2*ovips[1]; ovidp[5*3+0] = PB[0]*ovids[5] + zeta2*ovips[2]; ovidp[5*3+1] = PB[1]*ovids[5]; ovidp[5*3+2] = PB[2]*ovids[5] + zeta2*ovips[0]; // -- (D||D) -- ovidd[0*6+0] = PB[0]*ovidp[0*3+0] + zeta2*ovids[0] + zi*ovipp[0*3+0]; ovidd[0*6+1] = PB[1]*ovidp[0*3+1] + zeta2*ovids[0]; ovidd[0*6+2] = PB[2]*ovidp[0*3+2] + zeta2*ovids[0]; ovidd[0*6+3] = PB[0]*ovidp[0*3+1] + zi*ovipp[0*3+1]; ovidd[0*6+4] = PB[1]*ovidp[0*3+2]; ovidd[0*6+5] = PB[2]*ovidp[0*3+0]; ovidd[1*6+0] = PB[0]*ovidp[1*3+0] + zeta2*ovids[1]; ovidd[1*6+1] = PB[1]*ovidp[1*3+1] + zeta2*ovids[1] + zi*ovipp[1*3+1]; ovidd[1*6+2] = PB[2]*ovidp[1*3+2] + zeta2*ovids[1]; ovidd[1*6+3] = PB[0]*ovidp[1*3+1]; ovidd[1*6+4] = PB[1]*ovidp[1*3+2] + zi*ovipp[1*3+2]; ovidd[1*6+5] = PB[2]*ovidp[1*3+0]; ovidd[2*6+0] = PB[0]*ovidp[2*3+0] + zeta2*ovids[2]; ovidd[2*6+1] = PB[1]*ovidp[2*3+1] + zeta2*ovids[2]; ovidd[2*6+2] = PB[2]*ovidp[2*3+2] + zeta2*ovids[2] + zi*ovipp[2*3+2]; ovidd[2*6+3] = PB[0]*ovidp[2*3+1]; ovidd[2*6+4] = PB[1]*ovidp[2*3+2]; ovidd[2*6+5] = PB[2]*ovidp[2*3+0] + zi*ovipp[2*3+0]; ovidd[3*6+0] = PB[0]*ovidp[3*3+0] + zeta2*ovids[3] + zeta2*ovipp[1*3+0]; ovidd[3*6+1] = PB[1]*ovidp[3*3+1] + zeta2*ovids[3] + zeta2*ovipp[0*3+1]; ovidd[3*6+2] = PB[2]*ovidp[3*3+2] + zeta2*ovids[3]; ovidd[3*6+3] = PB[0]*ovidp[3*3+1] + zeta2*ovipp[1*3+1]; ovidd[3*6+4] = PB[1]*ovidp[3*3+2] + zeta2*ovipp[0*3+2]; ovidd[3*6+5] = PB[2]*ovidp[3*3+0]; ovidd[4*6+0] = PB[0]*ovidp[4*3+0] + zeta2*ovids[4]; ovidd[4*6+1] = PB[1]*ovidp[4*3+1] + zeta2*ovids[4] + zeta2*ovipp[2*3+1]; ovidd[4*6+2] = PB[2]*ovidp[4*3+2] + zeta2*ovids[4] + zeta2*ovipp[1*3+2]; ovidd[4*6+3] = PB[0]*ovidp[4*3+1]; ovidd[4*6+4] = PB[1]*ovidp[4*3+2] + zeta2*ovipp[2*3+2]; ovidd[4*6+5] = PB[2]*ovidp[4*3+0] + zeta2*ovipp[1*3+0]; ovidd[5*6+0] = PB[0]*ovidp[5*3+0] + zeta2*ovids[5] + zeta2*ovipp[2*3+0]; ovidd[5*6+1] = PB[1]*ovidp[5*3+1] + zeta2*ovids[5]; ovidd[5*6+2] = PB[2]*ovidp[5*3+2] + zeta2*ovids[5] + zeta2*ovipp[0*3+2]; ovidd[5*6+3] = PB[0]*ovidp[5*3+1] + zeta2*ovipp[2*3+1]; ovidd[5*6+4] = PB[1]*ovidp[5*3+2]; ovidd[5*6+5] = PB[2]*ovidp[5*3+0] + zeta2*ovipp[0*3+0]; // kinetic energy integral // -- (P|T|S) -- keips[0] = PA[0]*keiss + xi2*ovips[0]; keips[1] = PA[1]*keiss + xi2*ovips[1]; keips[2] = PA[2]*keiss + xi2*ovips[2]; // -- (D|T|S) -- tmp = zeta2*keiss - xiza*oviss; keids[0] = PA[0]*keips[0] + xi2*ovids[0] + tmp; keids[1] = PA[1]*keips[1] + xi2*ovids[1] + tmp; keids[2] = PA[2]*keips[2] + xi2*ovids[2] + tmp; keids[3] = PA[0]*keips[1] + xi2*ovids[3]; keids[4] = PA[1]*keips[2] + xi2*ovids[4]; keids[5] = PA[2]*keips[0] + xi2*ovids[5]; // -- (P|T|P) -- keipp[0*3+0]=PB[0]*keips[0]+xi2*ovipp[0*3+0]+zeta2*keiss; keipp[0*3+1]=PB[1]*keips[0]+xi2*ovipp[0*3+1]; keipp[0*3+2]=PB[2]*keips[0]+xi2*ovipp[0*3+2]; keipp[1*3+0]=PB[0]*keips[1]+xi2*ovipp[1*3+0]; keipp[1*3+1]=PB[1]*keips[1]+xi2*ovipp[1*3+1]+zeta2*keiss; keipp[1*3+2]=PB[2]*keips[1]+xi2*ovipp[1*3+2]; keipp[2*3+0]=PB[0]*keips[2]+xi2*ovipp[2*3+0]; keipp[2*3+1]=PB[1]*keips[2]+xi2*ovipp[2*3+1]; keipp[2*3+2]=PB[2]*keips[2]+xi2*ovipp[2*3+2]+zeta2*keiss; // -- (D|T|P) -- keidp[0*3+0]=PB[0]*keids[0]+xi2*ovidp[0*3+0]+zi*keips[0]; keidp[0*3+1]=PB[1]*keids[0]+xi2*ovidp[0*3+1]; keidp[0*3+2]=PB[2]*keids[0]+xi2*ovidp[0*3+2]; keidp[1*3+0]=PB[0]*keids[1]+xi2*ovidp[1*3+0]; keidp[1*3+1]=PB[1]*keids[1]+xi2*ovidp[1*3+1]+zi*keips[1]; keidp[1*3+2]=PB[2]*keids[1]+xi2*ovidp[1*3+2]; keidp[2*3+0]=PB[0]*keids[2]+xi2*ovidp[2*3+0]; keidp[2*3+1]=PB[1]*keids[2]+xi2*ovidp[2*3+1]; keidp[2*3+2]=PB[2]*keids[2]+xi2*ovidp[2*3+2]+zi*keips[2]; keidp[3*3+0]=PB[0]*keids[3]+xi2*ovidp[3*3+0] +zeta2*keips[1]; keidp[3*3+1]=PB[1]*keids[3]+xi2*ovidp[3*3+1] +zeta2*keips[0]; keidp[3*3+2]=PB[2]*keids[3]+xi2*ovidp[3*3+2]; keidp[4*3+0]=PB[0]*keids[4]+xi2*ovidp[4*3+0]; keidp[4*3+1]=PB[1]*keids[4]+xi2*ovidp[4*3+1] +zeta2*keips[2]; keidp[4*3+2]=PB[2]*keids[4]+xi2*ovidp[4*3+2] +zeta2*keips[1]; keidp[5*3+0]=PB[0]*keids[5]+xi2*ovidp[5*3+0] +zeta2*keips[2]; keidp[5*3+1]=PB[1]*keids[5]+xi2*ovidp[5*3+1]; keidp[5*3+2]=PB[2]*keids[5]+xi2*ovidp[5*3+2] +zeta2*keips[0]; // -- (D|T|D) -- tmpds[0] = zeta2*keids[0] - xizb*ovids[0]; tmpds[1] = zeta2*keids[1] - xizb*ovids[1]; tmpds[2] = zeta2*keids[2] - xizb*ovids[2]; tmpds[3] = zeta2*keids[3] - xizb*ovids[3]; tmpds[4] = zeta2*keids[4] - xizb*ovids[4]; tmpds[5] = zeta2*keids[5] - xizb*ovids[5]; keidd[0*6+0] = PB[0]*keidp[0*3+0]+xi2*ovidd[0*6+0] + tmpds[0]+zi*keipp[0*3+0]; keidd[0*6+1] = PB[1]*keidp[0*3+1]+xi2*ovidd[0*6+1]+tmpds[0]; keidd[0*6+2] = PB[2]*keidp[0*3+2]+xi2*ovidd[0*6+2]+tmpds[0]; keidd[0*6+3] = PB[0]*keidp[0*3+1]+xi2*ovidd[0*6+3] +zi*keipp[0*3+1]; keidd[0*6+4] = PB[1]*keidp[0*3+2]+xi2*ovidd[0*6+4]; keidd[0*6+5] = PB[2]*keidp[0*3+0]+xi2*ovidd[0*6+5]; keidd[1*6+0] = PB[0]*keidp[1*3+0]+xi2*ovidd[1*6+0]+tmpds[1]; keidd[1*6+1] = PB[1]*keidp[1*3+1]+xi2*ovidd[1*6+1] + tmpds[1]+zi*keipp[1*3+1]; keidd[1*6+2] = PB[2]*keidp[1*3+2]+xi2*ovidd[1*6+2]+tmpds[1]; keidd[1*6+3] = PB[0]*keidp[1*3+1]+xi2*ovidd[1*6+3]; keidd[1*6+4] = PB[1]*keidp[1*3+2]+xi2*ovidd[1*6+4] +zi*keipp[1*3+2]; keidd[1*6+5] = PB[2]*keidp[1*3+0]+xi2*ovidd[1*6+5]; keidd[2*6+0] = PB[0]*keidp[2*3+0]+xi2*ovidd[2*6+0]+tmpds[2]; keidd[2*6+1] = PB[1]*keidp[2*3+1]+xi2*ovidd[2*6+1]+tmpds[2]; keidd[2*6+2] = PB[2]*keidp[2*3+2]+xi2*ovidd[2*6+2] + tmpds[2]+zi*keipp[2*3+2]; keidd[2*6+3] = PB[0]*keidp[2*3+1]+xi2*ovidd[2*6+3]; keidd[2*6+4] = PB[1]*keidp[2*3+2]+xi2*ovidd[2*6+4]; keidd[2*6+5] = PB[2]*keidp[2*3+0]+xi2*ovidd[2*6+5] +zi*keipp[2*3+0]; keidd[3*6+0] = PB[0]*keidp[3*3+0]+xi2*ovidd[3*6+0] + tmpds[3]+zeta2*keipp[1*3+0]; keidd[3*6+1] = PB[1]*keidp[3*3+1]+xi2*ovidd[3*6+1] + tmpds[3]+zeta2*keipp[0*3+1]; keidd[3*6+2] = PB[2]*keidp[3*3+2]+xi2*ovidd[3*6+2]+tmpds[3]; keidd[3*6+3] = PB[0]*keidp[3*3+1]+xi2*ovidd[3*6+3] +zeta2*keipp[1*3+1]; keidd[3*6+4] = PB[1]*keidp[3*3+2]+xi2*ovidd[3*6+4] +zeta2*keipp[0*3+2]; keidd[3*6+5] = PB[2]*keidp[3*3+0]+xi2*ovidd[3*6+5]; keidd[4*6+0] = PB[0]*keidp[4*3+0]+xi2*ovidd[4*6+0]+tmpds[4]; keidd[4*6+1] = PB[1]*keidp[4*3+1]+xi2*ovidd[4*6+1] + tmpds[4]+zeta2*keipp[2*3+1]; keidd[4*6+2] = PB[2]*keidp[4*3+2]+xi2*ovidd[4*6+2] + tmpds[4]+zeta2*keipp[1*3+2]; keidd[4*6+3] = PB[0]*keidp[4*3+1]+xi2*ovidd[4*6+3]; keidd[4*6+4] = PB[1]*keidp[4*3+2]+xi2*ovidd[4*6+4] +zeta2*keipp[2*3+2]; keidd[4*6+5] = PB[2]*keidp[4*3+0]+xi2*ovidd[4*6+5] +zeta2*keipp[1*3+0]; keidd[5*6+0] = PB[0]*keidp[5*3+0]+xi2*ovidd[5*6+0] + tmpds[5]+zeta2*keipp[2*3+0]; keidd[5*6+1] = PB[1]*keidp[5*3+1]+xi2*ovidd[5*6+1]+tmpds[5]; keidd[5*6+2] = PB[2]*keidp[5*3+2]+xi2*ovidd[5*6+2] + tmpds[5]+zeta2*keipp[0*3+2]; keidd[5*6+3] = PB[0]*keidp[5*3+1]+xi2*ovidd[5*6+3] +zeta2*keipp[2*3+1]; keidd[5*6+4] = PB[1]*keidp[5*3+2]+xi2*ovidd[5*6+4]; keidd[5*6+5] = PB[2]*keidp[5*3+0]+xi2*ovidd[5*6+5] +zeta2*keipp[0*3+0]; for ( i=0; i<6*6; i++ ) naidd[i] = ZERO; for ( kat=0; kat<(*nat); kat++ ) { dq = (double)atomic_number[kat]; PC[0] = P[0]-atom_x[kat]; PC[1] = P[1]-atom_y[kat]; PC[2] = P[2]-atom_z[kat]; PC2 = PC[0]*PC[0] + PC[1]*PC[1] + PC[2]*PC[2]; U = zeta * PC2; { int it0, pos; double dT, t_inv, st_inv, dT2, dT3, cnass; cnass = dq*css; if ( U < 44e0 ) { it0 = (int)(0.5e0 + U * FMT_fmt_inv_step_size); dT = it0 * FMT_fmt_step_size - U; dT2 = dT * _inv2_; dT3 = dT * _inv3_; pos = it0 * (4+4); naiss[0] = cnass*(((FMT_fmt_table4[pos+3] * dT3 + FMT_fmt_table4[pos+2] ) * dT2 + FMT_fmt_table4[pos+1] ) * dT + FMT_fmt_table4[pos+0] ); naiss[1] = cnass*(((FMT_fmt_table4[pos+4] * dT3 + FMT_fmt_table4[pos+3] ) * dT2 + FMT_fmt_table4[pos+2] ) * dT + FMT_fmt_table4[pos+1] ); naiss[2] = cnass*(((FMT_fmt_table4[pos+5] * dT3 + FMT_fmt_table4[pos+4] ) * dT2 + FMT_fmt_table4[pos+3] ) * dT + FMT_fmt_table4[pos+2] ); naiss[3] = cnass*(((FMT_fmt_table4[pos+6] * dT3 + FMT_fmt_table4[pos+5] ) * dT2 + FMT_fmt_table4[pos+4] ) * dT + FMT_fmt_table4[pos+3] ); naiss[4] = cnass*(((FMT_fmt_table4[pos+7] * dT3 + FMT_fmt_table4[pos+6] ) * dT2 + FMT_fmt_table4[pos+5] ) * dT + FMT_fmt_table4[pos+4] ); } else { st_inv = sqrt( 0.5e0 / U ); t_inv = st_inv * st_inv; naiss[0] = cnass * _spi2_ * st_inv; naiss[1] = t_inv * naiss[0]; naiss[2] = 3.e0 * t_inv * naiss[1]; naiss[3] = 5.e0 * t_inv * naiss[2]; naiss[4] = 7.e0 * t_inv * naiss[3]; } } //fmt( naiss, 4, U, dq*css ); // -- (P|A(0)|S) -- for ( m=0; m<=3; m++) { naips[m][0] = PA[0]*naiss[m] - PC[0]*naiss[m+1]; naips[m][1] = PA[1]*naiss[m] - PC[1]*naiss[m+1]; naips[m][2] = PA[2]*naiss[m] - PC[2]*naiss[m+1]; } // -- (D|A(0)|S) -- for (m=0; m<=2; m++) { tmp = naiss[m] - naiss[m+1]; naids[m][0] = PA[0]*naips[m][0]-PC[0]*naips[m+1][0] + zeta2*tmp; naids[m][1] = PA[1]*naips[m][1]-PC[1]*naips[m+1][1] + zeta2*tmp; naids[m][2] = PA[2]*naips[m][2]-PC[2]*naips[m+1][2] + zeta2*tmp; naids[m][3] = PA[0]*naips[m][1]-PC[0]*naips[m+1][1]; naids[m][4] = PA[1]*naips[m][2]-PC[1]*naips[m+1][2]; naids[m][5] = PA[2]*naips[m][0]-PC[2]*naips[m+1][0]; } // -- (P|A|P) -- for (m=0; m<=1; m++) { tmp = naiss[m] - naiss[m+1]; naipp[m][0*3+0]=PB[0]*naips[m][0]-PC[0]*naips[m+1][0] +zeta2*tmp; naipp[m][0*3+1]=PB[1]*naips[m][0]-PC[1]*naips[m+1][0]; naipp[m][0*3+2]=PB[2]*naips[m][0]-PC[2]*naips[m+1][0]; naipp[m][1*3+0]=PB[0]*naips[m][1]-PC[0]*naips[m+1][1]; naipp[m][1*3+1]=PB[1]*naips[m][1]-PC[1]*naips[m+1][1] +zeta2*tmp; naipp[m][1*3+2]=PB[2]*naips[m][1]-PC[2]*naips[m+1][1]; naipp[m][2*3+0]=PB[0]*naips[m][2]-PC[0]*naips[m+1][2]; naipp[m][2*3+1]=PB[1]*naips[m][2]-PC[1]*naips[m+1][2]; naipp[m][2*3+2]=PB[2]*naips[m][2]-PC[2]*naips[m+1][2] +zeta2*tmp; } // -- (D|A|P) -- for (m=0; m<=1; m++) { tmpps[0] = naips[m][0]-naips[m+1][0]; tmpps[1] = naips[m][1]-naips[m+1][1]; tmpps[2] = naips[m][2]-naips[m+1][2]; naidp[m][0*3+0]=PB[0]*naids[m][0]-PC[0]*naids[m+1][0] +zi*tmpps[0]; naidp[m][0*3+1]=PB[1]*naids[m][0]-PC[1]*naids[m+1][0]; naidp[m][0*3+2]=PB[2]*naids[m][0]-PC[2]*naids[m+1][0]; naidp[m][1*3+0]=PB[0]*naids[m][1]-PC[0]*naids[m+1][1]; naidp[m][1*3+1]=PB[1]*naids[m][1]-PC[1]*naids[m+1][1] +zi*tmpps[1]; naidp[m][1*3+2]=PB[2]*naids[m][1]-PC[2]*naids[m+1][1]; naidp[m][2*3+0]=PB[0]*naids[m][2]-PC[0]*naids[m+1][2]; naidp[m][2*3+1]=PB[1]*naids[m][2]-PC[1]*naids[m+1][2]; naidp[m][2*3+2]=PB[2]*naids[m][2]-PC[2]*naids[m+1][2] +zi*tmpps[2]; naidp[m][3*3+0]=PB[0]*naids[m][3]-PC[0]*naids[m+1][3] + zeta2*tmpps[1]; naidp[m][3*3+1]=PB[1]*naids[m][3]-PC[1]*naids[m+1][3] +zeta2*tmpps[0]; naidp[m][3*3+2]=PB[2]*naids[m][3]-PC[2]*naids[m+1][3]; naidp[m][4*3+0]=PB[0]*naids[m][4]-PC[0]*naids[m+1][4]; naidp[m][4*3+1]=PB[1]*naids[m][4]-PC[1]*naids[m+1][4] +zeta2*tmpps[2]; naidp[m][4*3+2]=PB[2]*naids[m][4]-PC[2]*naids[m+1][4] +zeta2*tmpps[1]; naidp[m][5*3+0]=PB[0]*naids[m][5]-PC[0]*naids[m+1][5] +zeta2*tmpps[2]; naidp[m][5*3+1]=PB[1]*naids[m][5]-PC[1]*naids[m+1][5]; naidp[m][5*3+2]=PB[2]*naids[m][5]-PC[2]*naids[m+1][5] +zeta2*tmpps[0]; } // -- (D|A|D) -- tmpds[0]=naids[0][0]-naids[1][0]; tmpds[1]=naids[0][1]-naids[1][1]; tmpds[2]=naids[0][2]-naids[1][2]; tmpds[3]=naids[0][3]-naids[1][3]; tmpds[4]=naids[0][4]-naids[1][4]; tmpds[5]=naids[0][5]-naids[1][5]; tmppp[0*3+0]=naipp[0][0*3+0]-naipp[1][0*3+0]; tmppp[0*3+1]=naipp[0][0*3+1]-naipp[1][0*3+1]; tmppp[0*3+2]=naipp[0][0*3+2]-naipp[1][0*3+2]; tmppp[1*3+0]=naipp[0][1*3+0]-naipp[1][1*3+0]; tmppp[1*3+1]=naipp[0][1*3+1]-naipp[1][1*3+1]; tmppp[1*3+2]=naipp[0][1*3+2]-naipp[1][1*3+2]; tmppp[2*3+0]=naipp[0][2*3+0]-naipp[1][2*3+0]; tmppp[2*3+1]=naipp[0][2*3+1]-naipp[1][2*3+1]; tmppp[2*3+2]=naipp[0][2*3+2]-naipp[1][2*3+2]; naidd[0*6+0]+=PB[0]*naidp[0][0*3+0]-PC[0]*naidp[1][0*3+0] + zeta2*tmpds[0]+zi*tmppp[0*3+0]; naidd[0*6+1]+=PB[1]*naidp[0][0*3+1]-PC[1]*naidp[1][0*3+1] + zeta2*tmpds[0]; naidd[0*6+2]+=PB[2]*naidp[0][0*3+2]-PC[2]*naidp[1][0*3+2] + zeta2*tmpds[0]; naidd[0*6+3]+=PB[0]*naidp[0][0*3+1]-PC[0]*naidp[1][0*3+1] + zi*tmppp[0*3+1]; naidd[0*6+4]+=PB[1]*naidp[0][0*3+2]-PC[1]*naidp[1][0*3+2]; naidd[0*6+5]+=PB[2]*naidp[0][0*3+0]-PC[2]*naidp[1][0*3+0]; naidd[1*6+0]+=PB[0]*naidp[0][1*3+0]-PC[0]*naidp[1][1*3+0] + zeta2*tmpds[1]; naidd[1*6+1]+=PB[1]*naidp[0][1*3+1]-PC[1]*naidp[1][1*3+1] + zeta2*tmpds[1]+zi*tmppp[1*3+1]; naidd[1*6+2]+=PB[2]*naidp[0][1*3+2]-PC[2]*naidp[1][1*3+2] + zeta2*tmpds[1]; naidd[1*6+3]+=PB[0]*naidp[0][1*3+1]-PC[0]*naidp[1][1*3+1]; naidd[1*6+4]+=PB[1]*naidp[0][1*3+2]-PC[1]*naidp[1][1*3+2] + zi*tmppp[1*3+2]; naidd[1*6+5]+=PB[2]*naidp[0][1*3+0]-PC[2]*naidp[1][1*3+0]; naidd[2*6+0]+=PB[0]*naidp[0][2*3+0]-PC[0]*naidp[1][2*3+0] + zeta2*tmpds[2]; naidd[2*6+1]+=PB[1]*naidp[0][2*3+1]-PC[1]*naidp[1][2*3+1] + zeta2*tmpds[2]; naidd[2*6+2]+=PB[2]*naidp[0][2*3+2]-PC[2]*naidp[1][2*3+2] + zeta2*tmpds[2]+zi*tmppp[2*3+2]; naidd[2*6+3]+=PB[0]*naidp[0][2*3+1]-PC[0]*naidp[1][2*3+1]; naidd[2*6+4]+=PB[1]*naidp[0][2*3+2]-PC[1]*naidp[1][2*3+2]; naidd[2*6+5]+=PB[2]*naidp[0][2*3+0]-PC[2]*naidp[1][2*3+0] + zi*tmppp[2*3+0]; naidd[3*6+0]+=PB[0]*naidp[0][3*3+0]-PC[0]*naidp[1][3*3+0] + zeta2*tmpds[3]+ zeta2*tmppp[1*3+0]; naidd[3*6+1]+=PB[1]*naidp[0][3*3+1]-PC[1]*naidp[1][3*3+1] + zeta2*tmpds[3]+ zeta2*tmppp[0*3+1]; naidd[3*6+2]+=PB[2]*naidp[0][3*3+2]-PC[2]*naidp[1][3*3+2] + zeta2*tmpds[3]; naidd[3*6+3]+=PB[0]*naidp[0][3*3+1]-PC[0]*naidp[1][3*3+1] + zeta2*tmppp[1*3+1]; naidd[3*6+4]+=PB[1]*naidp[0][3*3+2]-PC[1]*naidp[1][3*3+2] + zeta2*tmppp[0*3+2]; naidd[3*6+5]+=PB[2]*naidp[0][3*3+0]-PC[2]*naidp[1][3*3+0]; naidd[4*6+0]+=PB[0]*naidp[0][4*3+0]-PC[0]*naidp[1][4*3+0] + zeta2*tmpds[4]; naidd[4*6+1]+=PB[1]*naidp[0][4*3+1]-PC[1]*naidp[1][4*3+1] + zeta2*tmpds[4]+ zeta2*tmppp[2*3+1]; naidd[4*6+2]+=PB[2]*naidp[0][4*3+2]-PC[2]*naidp[1][4*3+2] + zeta2*tmpds[4]+ zeta2*tmppp[1*3+2]; naidd[4*6+3]+=PB[0]*naidp[0][4*3+1]-PC[0]*naidp[1][4*3+1]; naidd[4*6+4]+=PB[1]*naidp[0][4*3+2]-PC[1]*naidp[1][4*3+2] + zeta2*tmppp[2*3+2]; naidd[4*6+5]+=PB[2]*naidp[0][4*3+0]-PC[2]*naidp[1][4*3+0] + zeta2*tmppp[1*3+0]; naidd[5*6+0]+=PB[0]*naidp[0][5*3+0]-PC[0]*naidp[1][5*3+0] + zeta2*tmpds[5]+ zeta2*tmppp[2*3+0]; naidd[5*6+1]+=PB[1]*naidp[0][5*3+1]-PC[1]*naidp[1][5*3+1] + zeta2*tmpds[5]; naidd[5*6+2]+=PB[2]*naidp[0][5*3+2]-PC[2]*naidp[1][5*3+2] + zeta2*tmpds[5]+ zeta2*tmppp[0*3+2]; naidd[5*6+3]+=PB[0]*naidp[0][5*3+1]-PC[0]*naidp[1][5*3+1] + zeta2*tmppp[2*3+1]; naidd[5*6+4]+=PB[1]*naidp[0][5*3+2]-PC[1]*naidp[1][5*3+2]; naidd[5*6+5]+=PB[2]*naidp[0][5*3+0]-PC[2]*naidp[1][5*3+0] + zeta2*tmppp[0*3+0]; } // contraction for ( i=0; i<6*6; i++ ) { OVI[i] += ovidd[i]; HCORE[i] += (keidd[i]-naidd[i]); } } // jps } // ips ij = 0; for ( i=0; i<6; i++ ) { coe_a = (i<3? 1.e0 : sqr3 ); for ( j=0; j<6; j++ ) { coe = coe_a * (j<3? 1.e0 : sqr3 ); OVI[ij] *= coe; HCORE[ij] *= coe; ij++; } } }
ewald.h
#ifndef ewald_h #define ewald_h #include "exafmm.h" namespace exafmm { //! Wave structure for Ewald summation struct Wave { vec3 K; //!< 3-D wave number vector real_t real; //!< real part of wave real_t imag; //!< imaginary part of wave }; typedef std::vector<Wave> Waves; //!< Vector of Wave types static int KSIZE; //!< Number of waves in Ewald summation static real_t ALPHA; //!< Scaling parameter for Ewald summation static real_t SIGMA; //!< Scaling parameter for Ewald summation static real_t CUTOFF; //!< Cutoff distance static vec3 K; //!< Wave number vector static vec3 SCALE; //!< Scale vector //! Forward DFT void dft(Waves & waves, Bodies & bodies) { #pragma omp parallel for for (size_t w=0; w<waves.size(); w++) { waves[w].real = waves[w].imag = 0; for (size_t b=0; b<bodies.size(); b++) { real_t th = sum(waves[w].K * bodies[b].X * SCALE); waves[w].real += bodies[b].q * std::cos(th); waves[w].imag += bodies[b].q * std::sin(th); } } } //! Inverse DFT void idft(Waves & waves, Bodies & bodies) { #pragma omp parallel for for (size_t b=0; b<bodies.size(); b++) { real_t p = 0; vec3 F = 0; for (size_t w=0; w<waves.size(); w++) { real_t th = sum(waves[w].K * bodies[b].X * SCALE); real_t dtmp = waves[w].real * std::sin(th) - waves[w].imag * std::cos(th); p += waves[w].real * std::cos(th) + waves[w].imag * std::sin(th); F -= waves[w].K * dtmp; } F *= SCALE; bodies[b].p += p; bodies[b].F += F; } } //! Initialize wave vector Waves initWaves() { for (int d=0; d<3; d++) SCALE[d] = 2 * M_PI / CYCLE; Waves waves; int kmaxsq = KSIZE * KSIZE; int kmax = KSIZE; for (int l=0; l<=kmax; l++) { int mmin = -kmax; if (l==0) mmin = 0; for (int m=mmin; m<=kmax; m++) { int nmin = -kmax; if (l==0 && m==0) nmin=1; for (int n=nmin; n<=kmax; n++) { real_t ksq = l * l + m * m + n * n; if (ksq <= kmaxsq) { Wave wave; wave.K[0] = l; wave.K[1] = m; wave.K[2] = n; wave.real = wave.imag = 0; waves.push_back(wave); } } } } return waves; } //! Ewald real part P2P kernel void realP2P(Cell * Ci, Cell * Cj, vec3 iX) { for (Body * Bi=Ci->body; Bi!=Ci->body+Ci->numBodies; Bi++) { for (Body * Bj=Cj->body; Bj!=Cj->body+Cj->numBodies; Bj++) { vec3 dX; for (int d=0; d<3; d++) dX[d] = Bi->X[d] - Bj->X[d] - iX[d] * CYCLE; real_t R2 = norm(dX); if (0 < R2 && R2 < CUTOFF * CUTOFF) { real_t R2s = R2 * ALPHA * ALPHA; real_t Rs = std::sqrt(R2s); real_t invRs = 1 / Rs; real_t invR2s = invRs * invRs; real_t invR3s = invR2s * invRs; real_t dtmp = Bj->q * (M_2_SQRTPI * std::exp(-R2s) * invR2s + erfc(Rs) * invR3s); dtmp *= ALPHA * ALPHA * ALPHA; Bi->p += Bj->q * erfc(Rs) * invRs * ALPHA; Bi->F -= dX * dtmp; } } } } //! Get leaf cells void getLeaf(Cells & ileafs, Cell * Ci) { if (Ci->numChilds == 0) ileafs.push_back(*Ci); for (Cell * ci=Ci->child; ci!=Ci->child+Ci->numChilds; ci++) { getLeaf(ileafs, ci); } } //! Find neighbor cell void neighbor(Cell * Ci, Cell * Cj) { vec3 dX = Ci->X - Cj->X; vec3 iX = 0; for (int d=0; d<3; d++) { if(dX[d] < -CYCLE / 2) iX[d]--; if(dX[d] > CYCLE / 2) iX[d]++; dX[d] -= iX[d] * CYCLE; } real_t R = std::sqrt(norm(dX)); if (R - Ci->R - Cj->R < sqrtf(3) * CUTOFF) { if(Cj->numChilds == 0) realP2P(Ci, Cj, iX); for (Cell * cj=Cj->child; cj!=Cj->child+Cj->numChilds; cj++) { neighbor(Ci, cj); } } } //! Ewald real part void realPart(Cells & icells, Cells & jcells) { Cells ileafs; getLeaf(ileafs, &icells[0]); #pragma omp parallel for for (size_t i=0; i<ileafs.size(); i++) { neighbor(&ileafs[i], &jcells[0]); } } //! Ewald wave part void wavePart(Bodies & bodies, Bodies & jbodies) { Waves waves = initWaves(); dft(waves,jbodies); real_t coef = 2 / SIGMA / CYCLE / CYCLE / CYCLE; real_t coef2 = 1 / (4 * ALPHA * ALPHA); for (size_t w=0; w<waves.size(); w++) { K = waves[w].K * SCALE; real_t K2 = K[0] * K[0] + K[1] * K[1] + K[2] * K[2]; real_t factor = coef * std::exp(-K2 * coef2) / K2; waves[w].real *= factor; waves[w].imag *= factor; } idft(waves,bodies); } //! Subtract self term void selfTerm(Bodies & bodies) { for (size_t b=0; b<bodies.size(); b++) { bodies[b].p -= M_2_SQRTPI * (bodies[b].q * ALPHA); } } } #endif
bicg.c
/** * bicg.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // Error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.7 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define NX SIZE #define NY SIZE #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) { int i, j; for (i = 0; i < NX; i++) { r[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i * NY + j] = ((DATA_TYPE)i * j) / NX; } } for (i = 0; i < NY; i++) { p[i] = i * M_PI; } } int compareResults(DATA_TYPE *s, DATA_TYPE *s_outputFromGpu, DATA_TYPE *q, DATA_TYPE *q_outputFromGpu) { int i, fail; fail = 0; // Compare s with s_cuda for (i = 0; i < NX; i++) { if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i = 0; i < NY; i++) { if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void bicg_cpu(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, DATA_TYPE *p, DATA_TYPE *q) { int i, j; for (i = 0; i < NY; i++) { s[i] = 0.0; } for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { s[j] = s[j] + r[i] * A[i * NY + j]; q[i] = q[i] + A[i * NY + j] * p[j]; } } } void bicg_OMP(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, DATA_TYPE *p, DATA_TYPE *q) { int i, j; for (i = 0; i < NY; i++) { s[i] = 0.0; } #pragma omp target device(DEVICE_ID) map( \ to : A[ : NX *NY], p[ : NY], r[ : NX]) map(tofrom : s[ : NY], q[ : NX]) { #pragma omp parallel for collapse(1) for (j = 0; j < NY; j++) { for (i = 0; i < NX; i++) { s[j] = s[j] + r[i] * A[i * NY + j]; } } #pragma omp parallel for collapse(1) for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { q[i] = q[i] + A[i * NY + j] * p[j]; } } } } int main(int argc, char **argv) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *r; DATA_TYPE *s; DATA_TYPE *p; DATA_TYPE *q; DATA_TYPE *s_GPU; DATA_TYPE *q_GPU; A = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE)); r = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE)); s = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); p = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); q = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE)); s_GPU = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); q_GPU = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE)); fprintf(stdout, "<< BiCG Sub Kernel of BiCGStab Linear Solver >>\n"); init_array(A, p, r); t_start = rtclock(); bicg_OMP(A, r, s_GPU, p, q_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); bicg_cpu(A, r, s, p, q); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(s, s_GPU, q, q_GPU); #endif free(A); free(r); free(s); free(p); free(q); free(s_GPU); free(q_GPU); return fail; }
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/sync.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Init(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics auto iter = p_fmat->ColIterator(); iter->BeforeFirst(); while (iter->Next()) { auto batch = iter->Value(); for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.length != 0) { fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.length - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const { std::vector<bst_uint> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (unsigned i = 0; i < inst.length; ++i) { if (findex == inst[i].index) { if (inst[i].fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector<unsigned> &root_index = fmat.Info().root_index_; { // setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std::fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set rest of instances to default position const RowSet &rowset = p_fmat->BufferedRowset(); // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(rowset.Size()); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { const bst_uint ridx = rowset[i]; const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.length); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); auto iter = p_fmat->ColIterator(); while (iter->Next()) { auto batch = iter->Value(); for (auto fid : fsplits) { auto col = batch[fid]; const auto ndata = static_cast<bst_omp_uint>(col.length); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; const MetaInfo &info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats(param_)); for (unsigned int nid : qexpand_) { thread_temp[tid][nid].Clear(); } } const RowSet &rowset = fmat.BufferedRowset(); // setup position const auto ndata = static_cast<bst_omp_uint>(rowset.Size()); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { const bst_uint ridx = rowset[i]; const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair, info, ridx); } } // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s.Clear(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
teams_notarget.c
#include <stdlib.h> #include <stdio.h> #define N 10000 int main() { int n = N; int *a = (int *)malloc(n*sizeof(int)); #pragma omp teams distribute for(int i = 0; i < n; i++) a[i] = i; int err = 0; for(int i = 0; i < n; i++) if (a[i] != i) { printf("Error at %d: a = %d, should be %d\n", i, a[i], i); err++; if (err > 10) break; } return err; }
LockFreePoolWithList.h
#pragma once #include <omp.h> #include <cstdlib> #include <iostream> #include <vector> #include "common.h" #include <cassert> #include <cstring> #include <sys/mman.h> #define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) template <u64 MAX_THREADS = 16, u64 MAX_SEGMENT_BITS = 32, u64 BLOCK_SIZE = (1UL << 22)> class LockFreePoolWithList { alignas(64) void* __restrict nextFreePtrs[MAX_THREADS][MAX_SEGMENT_BITS]; public: #ifdef CALC_MEM_PER_EDGE u64 totMem = 0; #endif LockFreePoolWithList(){ memset(nextFreePtrs, 0, MAX_THREADS * MAX_SEGMENT_BITS * sizeof(void*)); } void* allocLog2(u64 log2size){ #ifdef _OPENMP void* __restrict * nextPtr = &nextFreePtrs[omp_get_thread_num()][log2size]; #else void* __restrict * nextPtr = &nextFreePtrs[0][log2size]; #endif if(__builtin_expect(*nextPtr == nullptr, 0)){ const u64 segSize = 1UL << log2size; const u64 blockSize = (segSize <= BLOCK_SIZE) ? BLOCK_SIZE : segSize; #ifdef USE_HUGEPAGE void* blockStart = mmap(NULL, blockSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_2MB, -1, 0); #else void* blockStart = aligned_alloc(64, blockSize); #endif #ifdef CALC_MEM_PER_EDGE #pragma omp atomic totMem += blockSize; #endif *nextPtr = blockStart; const u64 iter = blockSize >> log2size; for(u64 i = 0; i < (iter - 1); i++){ void* nextBlock = (u8*)blockStart + segSize; *(void**)blockStart = nextBlock; blockStart = nextBlock; } *(void**)blockStart = nullptr; } void* ret = *nextPtr; *nextPtr = *(void**)ret; return ret; } void* allocPow2(u64 size){ return allocLog2(getPow2Log2(size)); } void* allocate(u64 size){ return allocLog2(getNextPow2Log2(size)); } void freeLog2(void* ptr, u64 log2size){ #ifdef _OPENMP void* __restrict * nextPtr = &nextFreePtrs[omp_get_thread_num()][log2size]; #else void* __restrict * nextPtr = &nextFreePtrs[0][log2size]; #endif *(void**)ptr = *nextPtr; *nextPtr = ptr; } void freePow2(void* __restrict ptr, u64 size){ freeLog2(ptr, getPow2Log2(size)); } void deallocate(void* __restrict ptr, u64 size){ freeLog2(ptr, getNextPow2Log2(size)); } };
SinoConePad.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <memory.h> #include "omp.h" /* * C-function to inpaint missing data wedges in cone-beam sinogram (currently accepts 2D slices only) * * Inputs: * 1. 2D sinogram (A) where x-axis are detectors and y-axis are angles * 2. sinoMultip: a multiplier for the sinogram threshold in the range (0,1]; default: 0.3 * 3. gradMultip: a multiplier for the gradient threshold in the range (0,1]; default: 0.05 * 4. pad_lookup: an integer number of pixels to add to calculate a median of usabale values; default: 10 * 5. sino_roll: an integer number to roll the sinogram from the top to bottom symmetrically; default: 200 * * Outputs: * 1. Inpainted sinogram (B) * 2. dy^2 (squared gradient in Y direction) (U) * * to compile with OMP support: mex SinoConePad.c CFLAGS="\$CFLAGS -fopenmp -Wall -std=c99" LDFLAGS="\$LDFLAGS -fopenmp" * gcc -shared -Wl,-soname,SinoPad -fopenmp -o SinoPad.so -fPIC SinoConePad.c * * Harwell/18.04.17 */ float pad_func_joint(float *A, float *B, float *gradYs, int dimX, int dimY, float edgeval, float gradval, int pad_lookup, int mid_val); float gradYsq_func(float *U, float *gradYs, int dimX, int dimY); float copyAr(float *B, float *U, int dimX, int dimY); float copyAr_roll(float *B, float *U, int dimX, int dimY, int roll_value, int switcher); float cleaning_outliers_func(float *A, float *B, int dimX, int dimY, float edgeval); /*TODO: reading parameters from Python, would be great to add some fool-proof system */ /*TODO: Passing printf messages to the user from python? */ /*Handling Python input data*/ /* A: singoram [detectors, angles] */ /* sinoMultip: a multiplier for the sinogram in the range (0,1]; */ /* gradMultip: a multiplier for the gradient in the range (0,1]; */ /* pad_lookup: number of pixels to add to calculate a median of usabale values */ /* roll_value: symmetric sinogram roll from the top to bottom, 0 - do not roll */ void SinoPad(float *A, float sinoMultip, float gradMultip, int pad_lookup, int roll_value, int dimX, int dimY, float *B, float *gradYs) { int i, j, iter, counterElements, iterations_number, mid_val; float *U, edgeval, gradval, sumW; mid_val = (int)(0.5f*pad_lookup); /*TODO: some general checks are required as were in the MEX file bellow! */ /* if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) {mexErrMsgTxt("The input in a single precision is required"); } /*if(nrhs != 5) mexErrMsgTxt("Five inputs is reqired "); if ((pad_lookup <= 3) || (gradMultip > 200)) {mexErrMsgTxt("Too small or too large value for padding, try 10 "); } if ((roll_value < 0)) {mexErrMsgTxt(" Symmetric sinogram rolling value must be positive "); } if (number_of_dims == 3) { mexErrMsgTxt("Currently only 2D version is available"); } */ if ((sinoMultip <= 0) || (sinoMultip > 1)) { fprintf(stderr, "The sinogram multiplier must be in the range (0,1], e.g. try: 0.3 \n"); exit(EXIT_FAILURE); } if ((gradMultip <= 0) || (gradMultip > 1)) { fprintf(stderr, "The gradient multiplier must be in the range (0,1], e.g. try 0.05 \n"); exit(EXIT_FAILURE); } U = (float*) calloc (dimY*dimX,sizeof(float)); /*gradYs = (float*)mxGetPr(plhs[1] = mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));*/ /* find a critical edge-value of usable sinogram (tweakable by changing sinoMultip) */ edgeval = 0.0f; for(i=0; i<dimY*dimX; i++) if (A[i] > edgeval) edgeval = A[i]; edgeval = edgeval*sinoMultip; /* max value in sinogram multipled by sinoMultip*/ if (roll_value > 0) copyAr_roll(A, U, dimX, dimY, roll_value, 0); /* copying A to U with symmetric rolling */ else copyAr(A, U, dimX, dimY); /* just copying A to U*/ /* find a gradient edge-value */ gradYsq_func(U, gradYs, dimX, dimY); gradval = 0.0f; for(i=0; i<dimY*dimX; i++) if (gradYs[i] > gradval) gradval = gradYs[i]; gradval = gradval*gradMultip; /* max value in dy^2 gradient multipled by gradMultip */ printf("%s %f \n", "Threshold for the sinogram is:", edgeval); printf("%s %f \n", "Threshold for the gradient is:", gradval); /* prepare weighting vectors */ /* InvEucDist = (float*) calloc(pad_lookup,sizeof(float)); */ /* inverted euclidian distance (similarity) */ /* SumInvEucDist = (float*) calloc(pad_lookup,sizeof(float)); */ /* precalculated normilization factors based InvEucDist */ /*sumW = 0.0f; for(i=1; i<pad_lookup; i++) { InvEucDist[i] = 1.0f/sqrt(i); sumW += InvEucDist[i]; SumInvEucDist[i] = sumW; } */ /*Iterations run here*/ iterations_number = (int)(0.25f*dimY); /*MAX iteration value cannot be larger dimY */ /*iterations_number = 70;*/ for(iter=0; iter < iterations_number; iter++) { /* calculate the gradient along Y axis to establish edges */ if (iter > 0) {gradYsq_func(U, gradYs, dimX, dimY);} /* do padding by considering all neighbours */ pad_func_joint(U, B, gradYs, dimX, dimY, edgeval, gradval, pad_lookup, mid_val); /* copy B to U*/ copyAr(B, U, dimX, dimY); /* clean some outliers */ cleaning_outliers_func(U, B, dimX, dimY, edgeval); /* copy B to U*/ copyAr(B, U, dimX, dimY); /* check if need to terminate iterations earlier */ counterElements = 0; for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { if (B[i*dimY + j] > edgeval) counterElements++; }} if (counterElements == dimX*dimY) { printf("%s \n", "Padding completed!"); break; } } /* do unrolling or just copying */ if (roll_value > 0) copyAr_roll(U, B, dimX, dimY, roll_value, 1); else copyAr(U, B, dimX, dimY); printf("%s %i \n", "Iterations stopped at:", iter); free(U); } float gradYsq_func(float *U, float *gradYs, int dimX, int dimY) { int i,j,j1,j0; #pragma omp parallel for shared(U, gradYs) private(i, j, j0, j1) for (i=0; i<dimX; i++) { for (j=0; j<dimY; j++) { j0 = j-1; if (j0 < 0) j0 = j+1; j1 = j+1; if (j1 == dimY) j1 = j-1; gradYs[i*dimY + j] = pow((0.5f*(U[i*dimY + j1] - U[i*dimY + j0])),2); }} return *gradYs; } float pad_func_joint(float *A, float *B, float *gradYs, int dimX, int dimY, float edgeval, float gradval, int pad_lookup, int mid_val) { int i, j, l,counter,counter1,counter2,counter3,counter4,counter5,counter6,counter7,counter8,sumcounter; float val1,val2,val3,val4,val5,val6,val7,val8,temp; #pragma omp parallel for shared(A, B, gradYs) private(i, j, l, sumcounter, counter, val1,val2,val3,val4,val5,val6,val7,val8,counter1,counter2,counter3,counter4,counter5,counter6,counter7,counter8,temp) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { /* starting a pass in a column-by-column fashion, find one pixel at the edge at a time */ if (gradYs[i*dimY + j] >= gradval) { /* standing on the edge pixel, we need to check if data lie in the vicinity of the chosen neighbourhood of the size equal to pad_lookup */ counter1 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((j + l) < dimY) && (A[i*dimY + (j + l)] >= edgeval)) { counter1++; sumcounter += counter1; }} counter = 0; val1 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((j + l) < dimY) && (A[i*dimY + (j + l)] >= edgeval)) { temp = (float)(counter1 - counter)/(float)(sumcounter); val1 += temp * A[i*dimY + (j + l)]; counter++; }} /*printf("%i %f \n", counter1, val1);*/ counter2 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((j - l) >= 0) && (A[i*dimY + (j - l)] >= edgeval)) { counter2++; sumcounter += counter2; }} counter = 0; val2 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((j - l) >= 0) && (A[i*dimY + (j - l)] >= edgeval)) { temp = (float)(counter2 - counter)/(float)(sumcounter); val2 += temp * A[i*dimY + (j - l)]; counter++; }} /*printf("%i %f \n", counter2, val2);*/ counter3 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((i - l) >= 0) && (A[(i-l)*dimY + (j)] >= edgeval)) { counter3++; sumcounter += counter3; }} counter = 0; val3 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((i - l) >= 0) && (A[(i-l)*dimY + (j)] >= edgeval)) { temp = (float)(counter3 - counter)/(float)(sumcounter); val3 += temp * A[(i-l)*dimY + (j)]; counter++; }} /*printf("%i %f \n", counter3, val3);*/ counter4 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((i + l) < dimX) && (A[(i+l)*dimY + (j)] >= edgeval)) { counter4++; sumcounter += counter4; }} counter = 0; val4 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((i + l) < dimX) && (A[(i+l)*dimY + (j)] >= edgeval)) { temp = (float)(counter4 - counter)/(float)(sumcounter); val4 += temp * A[(i+l)*dimY + (j)]; counter++; }} /*printf("%i %f \n", counter4, val4);*/ /* diagonal values */ counter5 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((j + l) < dimY) && ((i + l) < dimX) && (A[(i + l)*dimY + (j + l)] >= edgeval)) { counter5++; sumcounter += counter5; }} counter = 0; val5 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((j + l) < dimY) && ((i + l) < dimX) && (A[(i + l)*dimY + (j + l)] >= edgeval)) { temp = (float)(counter5 - counter)/(float)(sumcounter); val5 += temp * A[(i + l)*dimY + (j + l)]; counter++; }} /*printf("%i %f \n", counter5, val5);*/ counter6 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((j + l) < dimY) && ((i - l) >= 0) && (A[(i - l)*dimY + (j + l)] >= edgeval)) { counter6++; sumcounter += counter6; }} counter = 0; val6 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((j + l) < dimY) && ((i - l) >= 0) && (A[(i - l)*dimY + (j + l)] >= edgeval)) { temp = (float)(counter6 - counter)/(float)(sumcounter); val6 += temp * A[(i - l)*dimY + (j + l)]; counter++; }} /*printf("%i %f \n", counter6, val6);*/ counter7 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((j - l) >= 0) && ((i - l) >= 0) && (A[(i - l)*dimY + (j - l)] >= edgeval)) { counter7++; sumcounter += counter7; }} counter = 0; val7 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((j - l) >= 0) && ((i - l) >= 0) && (A[(i - l)*dimY + (j - l)] >= edgeval)) { temp = (float)(counter7 - counter)/(float)(sumcounter); val7 += temp * A[(i - l)*dimY + (j - l)]; counter++; }} /*printf("%i %f \n", counter7, val7);*/ counter8 = 0; sumcounter = 0; for(l=1; l<pad_lookup; l++) { if (((j - l) >= 0) && ((i + l) < dimX) && (A[(i + l)*dimY + (j - l)] >= edgeval)) { counter8++; sumcounter += counter8; }} counter = 0; val8 = 0.0f; for(l=1; l<pad_lookup; l++) { if (((j - l) >= 0) && ((i + l) < dimX) && (A[(i + l)*dimY + (j - l)] >= edgeval)) { temp = (float)(counter8 - counter)/(float)(sumcounter); val8 += temp * A[(i + l)*dimY + (j - l)]; counter++; }} /*printf("%i %f \n", counter8, val8);*/ /* total contribution of neighbors in the current position */ sumcounter = counter1 + counter2 + counter3 + counter4 + counter5 + counter6 + counter7 + counter8; /* resulting value */ if (sumcounter != 0) { temp = (float)(counter1)/(float)(sumcounter); B[i*dimY + j] = temp*val1; temp = (float)(counter2)/(float)(sumcounter); B[i*dimY + j] += temp*val2; temp = (float)(counter3)/(float)(sumcounter); B[i*dimY + j] += temp*val3; temp = (float)(counter4)/(float)(sumcounter); B[i*dimY + j] += temp*val4; temp = (float)(counter5)/(float)(sumcounter); B[i*dimY + j] += temp*val5; temp = (float)(counter6)/(float)(sumcounter); B[i*dimY + j] += temp*val6; temp = (float)(counter7)/(float)(sumcounter); B[i*dimY + j] += temp*val7; temp = (float)(counter8)/(float)(sumcounter); B[i*dimY + j] += temp*val8; } else B[i*dimY + j] = A[i*dimY + j]; } /* if (gradYs[i*dimY + j] >= gradval) */ else B[i*dimY + j] = A[i*dimY + j]; } } return *B; } float cleaning_outliers_func(float *A, float *B, int dimX, int dimY, float edgeval) { int i, j; #pragma omp parallel for shared(A, B) private(i, j) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { if ((j == 0) && (A[i*dimY + (j)] < edgeval)) { /* check the neighbour bellow */ if (A[i*dimY + (j+1)] > edgeval) B[i*dimY + (j)] = A[i*dimY + (j+1)]; } else if ((j == (dimY-1)) && (A[i*dimY + (dimY-1)] < edgeval)) { /* check the neighbour above */ if (A[i*dimY + (dimY-2)] > edgeval) B[i*dimY + (j)] = A[i*dimY + (dimY-2)]; } else { /* check the neighbours above and below */ if (A[i*dimY + (j)] < edgeval) { if ((A[i*dimY + (j-1)] > edgeval) && (A[i*dimY + (j+1)] > edgeval)) { B[i*dimY + (j)] = 0.5*(A[i*dimY + (j-1)] + A[i*dimY + (j+1)]); } } } }} return *B; } float copyAr(float *B, float *U, int dimX, int dimY) { int i; #pragma omp parallel for shared(U, B) private(i) for (i=0; i<dimY*dimX; i++) { U[i] = B[i]; } return *U; } float copyAr_roll(float *B, float *U, int dimX, int dimY, int roll_value, int switcher) { int i, j; #pragma omp parallel for shared(U, B) private(i,j) for (i=0; i<dimX; i++) { for (j=0; j<dimY; j++) { if (switcher == 0) { if (j < (dimY - roll_value)) U[i*dimY + j] = B[i*dimY + (j+roll_value)]; else U[i*dimY + j] = B[i*dimY + (j - (dimY - roll_value))]; } else { if (j < roll_value) U[i*dimY + j] = B[i*dimY + (j+(dimY - roll_value))]; else U[i*dimY + j] = B[i*dimY + (j - roll_value)]; } }} return *U; }
fci_4pdm.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define BLK 48 #define BUFBASE 96 double FCI_t1ci_sf(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb); /* * t2[:,i,j,k,l] = E^i_j E^k_l|ci0> */ static void rdm4_0b_t2(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; int i, j, k, l, a, sign, str1; double *t1 = malloc(sizeof(double) * nb * nnorb); double *pt1, *pt2; _LinkT *tab; // form t1 which has beta^+ beta |t1> => target stra_id FCI_t1ci_sf(ci0, t1, nb, stra_id, 0, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(i, j, k, l, a, str1, sign, pt1, pt2, tab) { #pragma omp for schedule(static, 1) nowait for (k = 0; k < bcount; k++) { memset(t2+k*n4, 0, sizeof(double)*n4); tab = clink_indexb + (strb_id+k) * nlinkb; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pt1 = t1 + str1 * nnorb; pt2 = t2 + k * n4 + (i*norb+a)*nnorb; if (sign > 0) { for (l = 0; l < nnorb; l++) { pt2[l] += pt1[l]; } } else { for (l = 0; l < nnorb; l++) { pt2[l] -= pt1[l]; } } } } } free(t1); } /* * t2[:,i,j,k,l] = E^i_j E^k_l|ci0> */ static void rdm4_a_t2(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; int i, j, k, l, a, sign, str1; double *pt1, *pt2; _LinkT *tab = clink_indexa + stra_id * nlinka; #pragma omp parallel private(i, j, k, l, a, str1, sign, pt1, pt2) { double *t1 = malloc(sizeof(double) * bcount * nnorb); #pragma omp for schedule(static, 40) for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); // form t1 which has alpha^+ alpha |t1> => target stra_id (through str1) FCI_t1ci_sf(ci0, t1, bcount, str1, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); for (k = 0; k < bcount; k++) { pt1 = t1 + k * nnorb; pt2 = t2 + k * n4 + (i*norb+a)*nnorb; if (sign > 0) { for (l = 0; l < nnorb; l++) { pt2[l] += pt1[l]; } } else { for (l = 0; l < nnorb; l++) { pt2[l] -= pt1[l]; } } } } free(t1); } } void FCI_t2ci_sf(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { rdm4_0b_t2(ci0, t2, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); rdm4_a_t2 (ci0, t2, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } static void tril3pdm_particle_symm(double *rdm3, double *tbra, double *t2ket, int bcount, int ncre, int norb) { assert(norb <= BLK); const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; int nnorb = norb * norb; int n4 = nnorb * nnorb; int i, j, k, m, n, blk1; int iblk = MIN(BLK/norb, norb); int blk = iblk * norb; //dgemm_(&TRANS_N, &TRANS_T, &n4, &nncre, &bcount, // &D1, t2ket, &n4, tbra, &nnorb, &D1, rdm3, &n4); // "upper triangle" F-array[k,j,i], k<=i<=j for (j = 0; j < ncre; j++) { for (n = 0; n < norb; n++) { for (k = 0; k < j+1-iblk; k+=iblk) { m = k * norb; i = m + blk; dgemm_(&TRANS_N, &TRANS_T, &i, &blk, &bcount, &D1, t2ket, &n4, tbra+m, &nnorb, &D1, rdm3+m*n4, &n4); } m = k * norb; i = (j+1) * norb; blk1 = i - m; dgemm_(&TRANS_N, &TRANS_T, &i, &blk1, &bcount, &D1, t2ket, &n4, tbra+m, &nnorb, &D1, rdm3+m*n4, &n4); t2ket += nnorb; rdm3 += nnorb; } } } static void tril2pdm_particle_symm(double *rdm2, double *tbra, double *tket, int bcount, int ncre, int norb) { assert(norb <= BLK); const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; int nnorb = norb * norb; int nncre = norb * ncre; int m, n; int blk = MIN(BLK/norb, norb) * norb; //dgemm_(&TRANS_N, &TRANS_T, &nncre, &nncre, &bcount, // &D1, tket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); // upper triangle part of F-array for (m = 0; m < nncre-blk; m+=blk) { n = m + blk; dgemm_(&TRANS_N, &TRANS_T, &n, &blk, &bcount, &D1, tket, &nnorb, tbra+m, &nnorb, &D1, rdm2+m*nnorb, &nnorb); } n = nncre - m; dgemm_(&TRANS_N, &TRANS_T, &nncre, &n, &bcount, &D1, tket, &nnorb, tbra+m, &nnorb, &D1, rdm2+m*nnorb, &nnorb); } static void make_rdm12_sf(double *rdm1, double *rdm2, double *bra, double *ket, double *t1bra, double *t1ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int INC1 = 1; const double D1 = 1; const int nnorb = norb * norb; int k, l; size_t n; double *tbra = malloc(sizeof(double) * nnorb * bcount); double *pbra, *pt1; for (n = 0; n < bcount; n++) { pbra = tbra + n * nnorb; pt1 = t1bra + n * nnorb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt1[l*norb+k]; } } } dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, t1ket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); dgemv_(&TRANS_N, &nnorb, &bcount, &D1, t1ket, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); free(tbra); } static void make_rdm12_spin0(double *rdm1, double *rdm2, double *bra, double *ket, double *t1bra, double *t1ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int INC1 = 1; const double D1 = 1; const int nnorb = norb * norb; int k, l; size_t n; double *tbra = malloc(sizeof(double) * nnorb * bcount); double *pbra, *pt1; double factor; for (n = 0; n < bcount; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } pbra = tbra + n * nnorb; pt1 = t1bra + n * nnorb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt1[l*norb+k] * factor; } } } dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, t1ket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); dgemv_(&TRANS_N, &nnorb, &bcount, &D1, tbra, &nnorb, bra+stra_id*na+strb_id, &INC1, &D1, rdm1, &INC1); free(tbra); } void FCI4pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; const size_t n6 = nnorb * nnorb * nnorb; int i, j, k, l, ij; size_t n; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * bcount * 2); double *t2bra = malloc(sizeof(double) * n4 * bcount * 2); double *t1ket = t1bra + nnorb * bcount; double *t2ket = t2bra + n4 * bcount; double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (bra == ket) { t1ket = t1bra; t2ket = t2bra; } else { FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(ket, t2ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2) { tbra = malloc(sizeof(double) * nnorb * bcount); #pragma omp for schedule(static, 1) nowait for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) for (n = 0; n < bcount; n++) { for (k = 0; k < norb; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3]; } } } i = ij / norb; j = ij - i * norb; // contract <bra-of-Eij| with |E^k_l E^m_n ket> tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket, bcount, j+1, norb); // rdm3 tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, bcount, j+1, norb); } free(tbra); } make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket, bcount, stra_id, strb_id, norb, na, nb); free(t1bra); free(t2bra); } /* * use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...] */ void FCI4pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { int fill1; if (strb_id+bcount <= stra_id) { fill1 = bcount; } else if (stra_id >= strb_id) { fill1 = stra_id - strb_id + 1; } else { return; } const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; const size_t n6 = nnorb * nnorb * nnorb; int i, j, k, l, ij; size_t n; double factor; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * fill1 * 2); double *t2bra = malloc(sizeof(double) * n4 * fill1 * 2); double *t1ket = t1bra + nnorb * fill1; double *t2ket = t2bra + n4 * fill1; double *pbra, *pt2; FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (bra == ket) { t1ket = t1bra; t2ket = t2bra; } else { FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(ket, t2ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2, factor) { tbra = malloc(sizeof(double) * nnorb * fill1); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) i = ij / norb; j = ij - i * norb; for (n = 0; n < fill1; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } for (k = 0; k <= j; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3] * factor; } } } // contract <bra-of-Eij| with |E^k_l E^m_n ket> tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket, fill1, j+1, norb); // rdm3 tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, fill1, j+1, norb); } free(tbra); } make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket, fill1, stra_id, strb_id, norb, na, nb); free(t1bra); free(t2bra); } /* * This function returns incomplete rdm3, rdm4, in which, particle * permutation symmetry is assumed. * kernel can be FCI4pdm_kern_sf, FCI4pdm_kern_spin0 */ void FCIrdm4_drv(void (*kernel)(), double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { const size_t nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int ib, strk, bcount; _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * nnorb); memset(rdm2, 0, sizeof(double) * n4); memset(rdm3, 0, sizeof(double) * n4 * nnorb); memset(rdm4, 0, sizeof(double) * n4 * n4); for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { bcount = MIN(BUFBASE, nb-ib); (*kernel)(rdm1, rdm2, rdm3, rdm4, bra, ket, bcount, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } } free(clinka); free(clinkb); } void FCI3pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; int i, j, k, l, ij; size_t n; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * bcount); double *t1ket = malloc(sizeof(double) * nnorb * bcount); double *t2bra = malloc(sizeof(double) * n4 * bcount); double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2) { tbra = malloc(sizeof(double) * nnorb * bcount); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) for (n = 0; n < bcount; n++) { pbra = tbra + n * nnorb; pt2 = t2bra + n * n4 + ij; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt2[l*n3+k*nnorb]; } } } i = ij / norb; j = ij - i * norb; tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, bcount, j+1, norb); } free(tbra); } make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket, bcount, stra_id, strb_id, norb, na, nb); free(t1bra); free(t1ket); free(t2bra); } /* * use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...] */ void FCI3pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { int fill1; if (strb_id+bcount <= stra_id) { fill1 = bcount; } else if (stra_id >= strb_id) { fill1 = stra_id - strb_id + 1; } else { return; } const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; int i, j, k, l, ij; size_t n; double factor; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * fill1); double *t1ket = malloc(sizeof(double) * nnorb * fill1); double *t2bra = malloc(sizeof(double) * n4 * fill1); double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2, factor) { tbra = malloc(sizeof(double) * nnorb * fill1); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) i = ij / norb; j = ij - i * norb; for (n = 0; n < fill1; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } for (k = 0; k <= j; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3] * factor; } } } tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, fill1, j+1, norb); } free(tbra); } make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket, fill1, stra_id, strb_id, norb, na, nb); free(t1bra); free(t1ket); free(t2bra); } /* * This function returns incomplete rdm3, in which, particle * permutation symmetry is assumed. * kernel can be FCI3pdm_kern_ms0, FCI3pdm_kern_spin0 */ void FCIrdm3_drv(void (*kernel)(), double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { const size_t nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int ib, strk, bcount; _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * nnorb); memset(rdm2, 0, sizeof(double) * n4); memset(rdm3, 0, sizeof(double) * n4 * nnorb); for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { bcount = MIN(BUFBASE, nb-ib); (*kernel)(rdm1, rdm2, rdm3, bra, ket, bcount, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } } free(clinka); free(clinkb); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, and false is returned. bool CheckConstraintExpression(Expr *CE); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction(TemplateDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); bool CheckConstraintSatisfaction(ClassTemplatePartialSpecializationDecl *TD, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); bool CheckConstraintSatisfaction(VarTemplatePartialSpecializationDecl *TD, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check that the associated constraints of a template declaration match the /// associated constraints of an older declaration of which it is a /// redeclaration. bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old, TemplateParameterList *New); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction& Satisfaction); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, SourceLocation ConceptNameLoc, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. using OMPCtxStringType = SmallString<8>; using OMPCtxSelectorData = OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, SourceRange SR, ArrayRef<OMPCtxSelectorData> Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion, bool &FunctionConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif